blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f75415b1c72053fc87600193f00e473c7bf954b2 | 0e94b21a64e01b992cdc0fff274af8d77b2ae430 | /pytorch/pytorch_11.py | 543d14d0138a272ccdee73aae7aae9665c94940a | [] | no_license | yangnaGitHub/LearningProcess | 1aed2da306fd98f027dcca61309082f42b860975 | 250a8b791f7deda1e716f361a2f847f4d12846d3 | refs/heads/master | 2020-04-15T16:49:38.053846 | 2019-09-05T05:52:04 | 2019-09-05T05:52:04 | 164,852,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,777 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 3 16:08:22 2019
@author: natasha_yang
@e-mail: ityangna0402@163.com
"""
#大的数据可以压缩传输下载之后再解压还原
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
import numpy as np
####准备数据
train_data = torchvision.datasets.MNIST(
root='./mnist',#保存的地方
train=True,
transform=torchvision.transforms.ToTensor(),#转换成torch.FloatTensor(C,H,W),训练的时候 normalize到[0.0, 1.0]区间
download=False#没下载就下载,下载好了就是False
)
BATCH_SIZE = 64
train_loader = Data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
#AutoEncoder=>[encoder, decoder]=压缩后得到压缩的特征值,再从压缩的特征值解压成原图片
class AutoEncode(nn.Module):
def __init__(self):
super(AutoEncode, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, 12),
nn.Tanh(),
nn.Linear(12, 3))
self.decoder = nn.Sequential(
nn.Linear(3, 12),
nn.Tanh(),
nn.Linear(12, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, 28*28),
nn.Sigmoid())
def forward(self, x_input):
encode = self.encoder(x_input)
decode = self.decoder(encode)
return encode, decode
autoencoder = AutoEncode()
####定义优化函数和LOSS
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=0.005)
loss_func = nn.MSELoss()
N_TEST_IMG = 5#只展示前面的5个
f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2))#2行5列
view_data = train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.
#第一行
for index in range(N_TEST_IMG):
a[0][index].imshow(np.reshape(view_data.data.numpy()[index], (28, 28)), cmap='gray')
a[0][index].set_xticks(())
a[0][index].set_yticks(())
for epoch in range(10):
for step, (batch_x, batch_y) in enumerate(train_loader):
batch_x, batch_x_d = Variable(batch_x.view(-1, 28*28)),Variable(batch_x.view(-1, 28*28))
#batch_y = Variable(batch_y)
encode, decode = autoencoder(batch_x)
loss = loss_func(decode, batch_x_d)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if 0 == step % 100:
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.numpy())
_, decoded_data = autoencoder(view_data)
#第二行
for index in range(N_TEST_IMG):
a[1][index].clear()
a[1][index].imshow(np.reshape(decoded_data.data.numpy()[index], (28, 28)), cmap='gray')
a[1][index].set_xticks(())
a[1][index].set_yticks(())
plt.draw()
plt.pause(0.05)
plt.ioff()
plt.show()
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
view_data = train_data.train_data[:200].view(-1, 28*28).type(torch.FloatTensor)/255.
encoded_data, _ = autoencoder(view_data)
fig = plt.figure(2)
ax = Axes3D(fig)
DX, DY, DZ = encoded_data.data[:, 0].numpy(), encoded_data.data[:, 1].numpy(), encoded_data.data[:, 2].numpy()
values = train_data.train_labels[:200].numpy()
for dx, dy, dz, dv in zip(DX, DY, DX, values):
cb = cm.rainbow(int(255*dv/9))#颜色
ax.text(dx, dy, dz, dv, backgroundcolor=cb)
ax.set_xlim(DX.min(), DX.max())
ax.set_ylim(DY.min(), DY.max())
ax.set_zlim(DZ.min(), DZ.max())
plt.show()
| [
"ityangna0402@163.com"
] | ityangna0402@163.com |
251b9960cc43b1d787b4b9b6168c4e25c2192138 | c4bb6b11e9bec547bca18cf3f1c2ac0bcda6d0b0 | /halld/test/resource.py | 17cc4c6b89c88ace5893dbaa716b05ec7a8980e4 | [] | no_license | ox-it/halld | 43e8fb38a12977bc4f2323a18fdae42683c5a494 | 917012265548b76941658a56ec83dfe2cafc8e4a | refs/heads/master | 2021-01-16T01:01:26.355534 | 2015-05-05T20:36:08 | 2015-05-05T20:36:08 | 16,612,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,645 | py | import json
import unittest
from rest_framework.test import force_authenticate
from .base import TestCase
from .. import models
from .. import response_data
class ResourceListTestCase(TestCase):
def create_resources(self):
# Will be defunct by default.
self.defunct_resource = models.Resource.create(self.superuser, 'snake')
self.extant_resource = models.Resource.create(self.superuser, 'snake')
models.Source.objects.create(resource=self.extant_resource,
type_id='science',
data={'foo': 'bar'},
author=self.superuser,
committer=self.superuser)
self.assertEqual(self.extant_resource.extant, True)
def testGetResourceList(self):
request = self.factory.get('/snake')
force_authenticate(request, self.anonymous_user)
response = self.resource_list_view(request, 'snake')
self.assertIsInstance(response.data, response_data.ResourceList)
def testDefunctResources(self):
self.create_resources()
request = self.factory.get('/snake?defunct=on&extant=off')
request.user = self.anonymous_user
response = self.resource_list_view(request, 'snake')
self.assertIsInstance(response.data, response_data.ResourceList)
self.assertEqual(response.data['paginator'].count, 1)
self.assertEqual(next(response.data.resource_data)['self']['href'],
self.defunct_resource.href)
def testExtantResources(self):
self.create_resources()
request = self.factory.get('/snake')
force_authenticate(request, self.anonymous_user)
response = self.resource_list_view(request, 'snake')
self.assertIsInstance(response.data, response_data.ResourceList)
self.assertEqual(response.data['paginator'].count, 1)
self.assertEqual(next(response.data.resource_data)['self']['href'],
self.extant_resource.href)
class ResourceDetailTestCase(TestCase):
def testViewResource(self):
resource = models.Resource.create(self.superuser, 'snake')
resource.data = {'title': 'Python'}
resource.save(regenerate=False)
request = self.factory.get('/snake/' + resource.identifier,
headers={'Accept': 'application/hal+json'})
force_authenticate(request, self.anonymous_user)
response = self.resource_detail_view(request, 'snake', resource.identifier)
self.assertEqual(response.data.data.get('title'), 'Python')
| [
"alexander.dutton@it.ox.ac.uk"
] | alexander.dutton@it.ox.ac.uk |
12b6cb094a4322a405a3ee6b7bc100899285342d | ade82efdb6dfaa402ea5999698de10b29ba142c0 | /30DaysOfCode/day26-nested-logic.py | 57968d3c73a6952bb69ae3f066ae5174146ab56d | [] | no_license | markronquillo/programming-challenges-and-algorithms | 057747a3a9311d61b9a8f7bf6048ea95bb533a2e | 0deb686069cd82bd0c7d0bf7e03aabd7f1359da3 | refs/heads/master | 2021-01-12T05:18:23.637189 | 2017-12-02T14:45:29 | 2017-12-02T14:45:29 | 77,906,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py |
act_date = input()
exp_date = input()
def compute(act_date, exp_date):
act_day, act_month, act_year = [int(x) for x in act_date.split(' ')]
exp_day, exp_month, exp_year = [int(x) for x in exp_date.split(' ')]
if act_year > exp_year:
return 10000
elif act_month > exp_month:
if act_year < exp_year:
return 0
return 500 * (act_month - exp_month)
elif act_day > exp_day:
if act_month < exp_month:
return 0
return 15 * (act_day - exp_day)
else:
return 0
print(compute(act_date, exp_date))
| [
"markronquillo23@gmail.com"
] | markronquillo23@gmail.com |
43352cd049ced563696e7de3b487e343123b42d3 | 42f6b269f1baa87e295bd2281fb5a7e975b21ac7 | /04.For_Loop/03.More_Exercises/probe.py | 9909f6841f03786bf4604e4523fdbc5ad29f3626 | [] | no_license | Tuchev/Python-Basics---september---2020 | 54dab93adb41aa998f2c043f803f4751bbc8c758 | 8899cc7323611dd6eed00aa0550071a49dc72704 | refs/heads/main | 2023-06-19T11:48:11.607069 | 2021-07-15T13:33:02 | 2021-07-15T13:33:02 | 386,295,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | n = int(input())
even_sum = 0
odd_sum = 0
for number in range(1, n + 1):
current_number = int(input())
if number % 2 == 0
even_sum += current_number
else:
odd_sum += current_number
| [
"noreply@github.com"
] | Tuchev.noreply@github.com |
01d3dbef5745f091babf67730a13213939cded7b | 245b92f4140f30e26313bfb3b2e47ed1871a5b83 | /airflow/providers/google_vendor/googleads/v12/services/types/bidding_seasonality_adjustment_service.py | 9627a896feddaeef267b36c368273ea14a1ac92e | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ephraimbuddy/airflow | 238d6170a0e4f76456f00423124a260527960710 | 3193857376bc2c8cd2eb133017be1e8cbcaa8405 | refs/heads/main | 2023-05-29T05:37:44.992278 | 2023-05-13T19:49:43 | 2023-05-13T19:49:43 | 245,751,695 | 2 | 1 | Apache-2.0 | 2021-05-20T08:10:14 | 2020-03-08T04:28:27 | null | UTF-8 | Python | false | false | 7,069 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from airflow.providers.google_vendor.googleads.v12.enums.types import (
response_content_type as gage_response_content_type,
)
from airflow.providers.google_vendor.googleads.v12.resources.types import (
bidding_seasonality_adjustment as gagr_bidding_seasonality_adjustment,
)
from google.protobuf import field_mask_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="airflow.providers.google_vendor.googleads.v12.services",
marshal="google.ads.googleads.v12",
manifest={
"MutateBiddingSeasonalityAdjustmentsRequest",
"BiddingSeasonalityAdjustmentOperation",
"MutateBiddingSeasonalityAdjustmentsResponse",
"MutateBiddingSeasonalityAdjustmentsResult",
},
)
class MutateBiddingSeasonalityAdjustmentsRequest(proto.Message):
r"""Request message for
[BiddingSeasonalityAdjustmentService.MutateBiddingSeasonalityAdjustments][google.ads.googleads.v12.services.BiddingSeasonalityAdjustmentService.MutateBiddingSeasonalityAdjustments].
Attributes:
customer_id (str):
Required. ID of the customer whose
seasonality adjustments are being modified.
operations (Sequence[google.ads.googleads.v12.services.types.BiddingSeasonalityAdjustmentOperation]):
Required. The list of operations to perform
on individual seasonality adjustments.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v12.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="BiddingSeasonalityAdjustmentOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class BiddingSeasonalityAdjustmentOperation(proto.Message):
r"""A single operation (create, remove, update) on a seasonality
adjustment.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
create (google.ads.googleads.v12.resources.types.BiddingSeasonalityAdjustment):
Create operation: No resource name is
expected for the new seasonality adjustment.
This field is a member of `oneof`_ ``operation``.
update (google.ads.googleads.v12.resources.types.BiddingSeasonalityAdjustment):
Update operation: The seasonality adjustment
is expected to have a valid resource name.
This field is a member of `oneof`_ ``operation``.
remove (str):
Remove operation: A resource name for the removed
seasonality adjustment is expected, in this format:
``customers/{customer_id}/biddingSeasonalityAdjustments/{seasonality_adjustment_id}``
This field is a member of `oneof`_ ``operation``.
"""
update_mask = proto.Field(
proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,
)
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_bidding_seasonality_adjustment.BiddingSeasonalityAdjustment,
)
update = proto.Field(
proto.MESSAGE,
number=2,
oneof="operation",
message=gagr_bidding_seasonality_adjustment.BiddingSeasonalityAdjustment,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateBiddingSeasonalityAdjustmentsResponse(proto.Message):
r"""Response message for seasonality adjustments mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (for example, auth errors), we return
an RPC level error.
results (Sequence[google.ads.googleads.v12.services.types.MutateBiddingSeasonalityAdjustmentsResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="MutateBiddingSeasonalityAdjustmentsResult",
)
class MutateBiddingSeasonalityAdjustmentsResult(proto.Message):
r"""The result for the seasonality adjustment mutate.
Attributes:
resource_name (str):
Returned for successful operations.
bidding_seasonality_adjustment (google.ads.googleads.v12.resources.types.BiddingSeasonalityAdjustment):
The mutated bidding seasonality adjustment with only mutable
fields after mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
bidding_seasonality_adjustment = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_bidding_seasonality_adjustment.BiddingSeasonalityAdjustment,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | ephraimbuddy.noreply@github.com |
2727043746f5618d29fb60b6cdf111942c7228a5 | 82762d776e2400948af54ca2e1bdf282885d922c | /581. 最短无序连续子数组.py | 8bc3cb543d1ee49d622254c2d89f338667666d2e | [] | no_license | dx19910707/LeetCode | f77bab78bcba2d4002c9662c122b82fc3c9caa46 | 624975f767f6efa1d7361cc077eaebc344d57210 | refs/heads/master | 2020-03-17T02:50:46.546878 | 2019-06-25T09:22:13 | 2019-06-25T09:22:13 | 133,208,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums2 = sorted(nums)
if nums == nums2:
return 0
i = 0
while nums[i] == nums2[i]:
i += 1
j = -1
while nums[j] == nums2[j]:
j -= 1
return len(nums[i:j])+1 | [
"dx19910707@qq.com"
] | dx19910707@qq.com |
6a28f4f34572bb5d2680de4aa8031dcde01e6e9f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02846/s115010620.py | 03e500ab3f57d88caf928fb9d1dd60d08d26c229 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | t1, t2 = map(int, input().split())
a1, a2 = map(int, input().split())
b1, b2 = map(int, input().split())
ave_a = t1 * a1 + t2 * a2
ave_b = t1 * b1 + t2 * b2
if ave_a < ave_b:
(a1, a2), (b1, b2) = (b1, b2), (a1, a2)
ave_a, ave_b = ave_b, ave_a
if ave_a == ave_b:
print('infinity')
exit()
half, all = t1 * (b1 - a1), ave_a - ave_b
ans = half // all * 2 + (half % all != 0)
print(max(0, ans))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8943983fe54f8f93c3c1901374f3d729d104c32a | 79f871eccb9e7c87d60575b7147ec464964b010d | /api/tests/core/test_resource_paginated_datasets.py | a3f9185c1e43d855fff960be9f90b0e3feccd716 | [] | no_license | lucassimon/upload-datasets | 2c0e4d2945204503e8eefda5934eb8ca03561d1b | 02f2b7324f389b1940ee60c886650cb04dd5e80e | refs/heads/master | 2023-01-12T07:14:13.175505 | 2019-12-08T21:41:17 | 2019-12-08T21:41:17 | 226,594,431 | 0 | 0 | null | 2023-01-05T02:32:22 | 2019-12-08T00:33:52 | Python | UTF-8 | Python | false | false | 1,225 | py | import pytest
from unittest import mock
from apps.core.models import Dataset
from .factories import DatasetFactory
from tests.utils import replace_id_to_take_snapshot
class TestDataset:
def setup_method(self):
DatasetFactory.reset_sequence()
def teardown_method(self):
Dataset.objects.delete()
def make_request(self, client, query=None):
url = "/datasets/page/1"
return client.get(url, content_type="application/json")
def test_should_response_dataset_paginated(self, auth, mongo, snapshot):
DatasetFactory.create_batch(3)
response = self.make_request(auth)
assert response.status_code == 200
res = response.json
items = replace_id_to_take_snapshot(response.json.get("data"))
res["data"] = items
snapshot.assert_match(res)
@mock.patch("apps.core.repositories.Dataset.objects")
def test_should_response_exception_when_an_error_raised(
self, DatasetMock, auth, mongo
):
DatasetMock.side_effect = Exception("Some error occurred")
response = self.make_request(auth)
assert response.status_code == 500
assert response.json.get("description") == "Some error occurred"
| [
"lucassrod@gmail.com"
] | lucassrod@gmail.com |
17f33729a06fe96c12e6d2add6182124fa44708c | 184f13269249b08e5b62444ece10af8a3a35c9a5 | /python_nlp_explorations_chatbot_keywords_extraction/article_3_keyword_extraction_nlp_spacy/05_kw_extractor_spacy_linguistic_features.py | b09b4d1d37e43a4303f452073fb8f9226411fc4c | [
"MIT"
] | permissive | bflaven/BlogArticlesExamples | 3decf588098897b104d429054b8e44efec796557 | aca40bb33f1ad4e140ddd67d6bb39bdd029ef266 | refs/heads/master | 2023-09-04T16:57:57.498673 | 2023-09-01T13:14:12 | 2023-09-01T13:14:12 | 42,390,873 | 9 | 4 | MIT | 2023-03-02T22:39:06 | 2015-09-13T09:48:34 | HTML | UTF-8 | Python | false | false | 5,184 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
cd /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_3_keyword_extraction_nlp_spacy/
python 05_kw_extractor_spacy_linguistic_features.py
"""
import spacy
from collections import Counter
from string import punctuation
# download best-matching version of specific model for your spaCy installation
# python -m spacy download en_core_web_sm
# nlp = spacy.load("en_core_web_sm")
# doc = nlp("This is a sentence.")
# print(doc)
# download best-matching version of specific model for your spaCy installation
# python -m spacy download en_core_web_lg
nlp = spacy.load("en_core_web_lg")
# import en_core_web_lg
# nlp = en_core_web_lg.load()
def get_hotwords(text):
result = []
pos_tag = ['PROPN', 'ADJ', 'NOUN'] # 1
doc = nlp(text.lower()) # 2
for token in doc:
# 3
if(token.text in nlp.Defaults.stop_words or token.text in punctuation):
continue
# 4
if(token.pos_ in pos_tag):
result.append(token.text)
return result # 5
input_text = "My last post was a modest attempt in discovering Python potential with the help of Anaconda. Meanwhile, Artificial Intelligence (AI) was bewitched me 🙂 So, I gained confidence using Python in order to explore AI librairies such as NTLK, OpenCV or face_recognition from ageitgey (Adam Geitgey)." \
"You can find the files in my github account More on https://github.com/bflaven/BlogArticlesExamples/ in python_playing_with_facial_recognition" \
"This post is about a real issue that can be solved by IA. I believe that for any technical topic, rather than exploring AI documentation which in itself could be quite wastful. I always strive to tackle a real use case and apply as much as possible what I want to discover." \
"So, applying this principle, I have spotted facial recognition for pictures. Like many, I am iOS mobile Phone user and I saw facial recognition at work. I start to wonder: “How can apply those facial recognition principles in a backoffice?”. As I am currently a PO for a Backoffice, I am supposed to have a clear “vision” of my product but more than that I know perfectly its weaknesses, especially dealing with pictures." \
"The benefit of this simple question allows me not to stress about the details right away and instead quickly jot something down as a placeholder for a conversation between me, myself and I, a soliloquy. The intent was to not forget to ask all the right questions either upfront or somewhere down the road but do not prevent from starting a P.O.C." \
"Here is my shortlist that highlights some of the functional and nonfunctional concerns or requirements that I wanted to address. This list has driven all my decisions to obtain a pragmatic result without missing the goal." \
"Goal: How can apply those facial recognition principles in a Backoffice in order to improve image search results at the end?" \
"Short actions todolist:" \
"Browse unknown people images directory with Facial Recognition Script" \
"Detect known people faces among these images set them aside" \
"Insert these known people images in a MySQL database" \
"Requirement: This Spike will be all made in Python but I do not want to end up with an overzealous shopping list of requirements." \
"Let me explain the situation. In the Backoffice, which I am dealing with, is hosting thousand of “unqualified” images that contains faces of: Donald Trump, Xi Jinping, Angela Merkel, Boris Johnson, Emmanuel Macron, Vladimir Poutine, Recep Tayyip Erdoğan or less kwnown poeple in an european centric point of view: Macky Sall, Rodrigo Duterte, Ramzan Kadyrov, Hun Sen, Narendra Modi, Hassan Rohani, Stevo Pendarovski, Nicolás Maduro, Edgar Lungu..." \
"Remember that we still need a human intelligence to say who is who? For your information, Stevo Pendarovski, Стево Пендаровски, is president of North Macedonia, holding the office since 12 May 2019 and he looks a bit like me 🙂 or that is just the glasses." \
"The face of Stevo Pendarovski, Стево Пендаровски, president of North Macedonia" \
"Improve a CMS's photos library qualification with AI, facial recognition in python, to provide better images search results to users" \
"The idea is to increase the relevance of research and reduce the sample of images that will be retrieved with a traditionnal textual search based on their name. It will save time, money and resources but also improve user experience. So, user do not get no results at all or improper results in his/her image search." \
"The fact, no one wants to qualify each image by opening them one after the other, adding correct caption to improve indexation and by consequence future’s search results. We are talking about more than 1 500 000 pictures. Indeed, the wise choice is to leave it to a computer." \
output = get_hotwords(input_text)
print ("\n --- output")
print(output)
print ("\n --- result for hashtags")
hashtags = [('#' + x[0]) for x in Counter(output).most_common(5)]
print(' '.join(hashtags))
| [
"bflaven@gmail.com"
] | bflaven@gmail.com |
3f9ffcbd11f2b59511325d196baa1f17d53eccc4 | ae69d0402e48f07752dbe9b37d3e814fb314f0ba | /scripts/rltest/main.py | ebb6d94ab31a37bbfecaf9971e9335802be7a4b1 | [] | no_license | mfkasim1/deepmk | e07600828ec929fc397516f90abbfd86f7ac56c2 | 392964790d67d0f02b981aaba2d887c3f5a14551 | refs/heads/master | 2023-04-26T07:57:30.286946 | 2021-05-25T08:08:26 | 2021-05-25T08:08:26 | 147,033,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | import deepmk
import torch
import gym
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import deepmk.rl.dataloaders as rlloader
from deepmk.rl.trainers import QLearn
from deepmk.rl.actors import QNet
from deepmk.rl import train, show
# set up the training components
env = gym.make("CartPole-v0")
model = nn.Sequential(
nn.Linear(4, 100),
nn.Sigmoid(),
nn.Linear(100, env.action_space.n)
)
actor = QNet(model, epsilon=0.1)
optimizer = optim.SGD(model.parameters(),
lr=1e-2, momentum=0.01)
rldataloader = rlloader.ReplayMemoryLoader(batch_size=10, shuffle=True)
trainer = QLearn(actor, optimizer, gamma=0.99,
# rldataloader=rldataloader
)
# scheduler = lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
# train the model
model = train(env, trainer, model, actor,
reward_preproc=lambda x:x, scheduler=None, num_episodes=10000,
val_every=20, val_episodes=10, verbose=1, plot=1,
save_wts_to="cartpole1.pkl")
# show(env, model, actor, load_wts_from="cartpole.pkl")
| [
"firman.kasim@gmail.com"
] | firman.kasim@gmail.com |
736d0d735cacf199d75cdf98f81f5ed5fd8aa6fb | 380a47268c5975473a2e7c38c747bc3bdbd981b1 | /benchmark/third_party/DeepSpeed/deepspeed/autotuning/tuner/cost_model.py | 0cdcef6483b4417e033f6e130c81eba7dc7b7d47 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | FMInference/FlexGen | 07aa9b1918c19b02077e13ad07e76840843810dd | d34f7b4b43ed87a374f394b0535ed685af66197b | refs/heads/main | 2023-07-24T02:29:51.179817 | 2023-07-21T22:38:31 | 2023-07-21T22:38:31 | 602,270,517 | 6,821 | 411 | Apache-2.0 | 2023-07-07T22:59:24 | 2023-02-15T21:18:53 | Python | UTF-8 | Python | false | false | 1,723 | py | from .utils import *
try:
import xgboost as xgb
except ImportError:
xgb = None
class XGBoostCostModel():
def __init__(self, loss_type, num_threads=None, log_interval=25, upper_model=None):
assert xgb is not None, "missing requirements, please install deepspeed w. 'autotuning_ml' extra."
self.loss_type = loss_type
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type == "rank":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.0,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
def fit(self, xs, ys):
x_train = np.array(xs, dtype=np.float32)
y_train = np.array(ys, dtype=np.float32)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-9)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self.bst = xgb.train(self.xgb_params, dtrain)
def predict(self, xs):
features = xgb.DMatrix(xs)
return self.bst.predict(features)
| [
"sqy1415@gmail.com"
] | sqy1415@gmail.com |
63601989f71d582a2eec18528e01b6995527a9de | 2aba62d66c2c622bdc148cef451da76cae5fd76c | /exercise/learn_python_dm2039/ch30/ch30_6.py | 2d0948851697dcf6dd8ee2c7187bb6e577979064 | [] | no_license | NTUT-109AB8011/crawler | 6a76de2ab1848ebc8365e071e76c08ca7348be62 | a703ec741b48d3af615a757fed7607b1f8eb66a6 | refs/heads/master | 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # ch30_6.py
import datetime
deltaTime = datetime.timedelta(days=100)
timeNow = datetime.datetime.now()
print("現在時間是 : ", timeNow)
print("100天後是 : ", timeNow + deltaTime)
| [
"terranandes@gmail.com"
] | terranandes@gmail.com |
513aefbd73b1950b907fd41d6a90aed13b4154f3 | 71d3555e12cb53ea81b097dfdf760d4aae8830d9 | /triton_transformer/transformer.py | cec34fb30038a73fac4b467207a3d7c094c4c56a | [
"MIT"
] | permissive | armheb/triton-transformer | 35cc60d8842503a71f43545007ff83197a93dc43 | 21b8490848ff00906b4e688e22017508580e5ec7 | refs/heads/main | 2023-08-27T14:37:36.483284 | 2021-10-04T21:39:35 | 2021-10-04T21:39:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,166 | py | import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from triton_transformer.layernorm import layernorm
from triton_transformer.softmax import softmax
from triton_transformer.cross_entropy import cross_entropy_fn
from triton_transformer.bmm import fused_relu_squared
from triton_transformer.dropout import dropout_fn
from triton_transformer.utils import exists, default
# helpers classes
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
use_triton = False
):
super().__init__()
self.use_triton = use_triton
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.dropout = dropout
self.norm = nn.LayerNorm(dim)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, mask = None, use_triton = None):
use_triton = default(use_triton, self.use_triton)
h = self.heads
x = layernorm(x, self.norm.weight, self.norm.bias, use_triton = use_triton)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b i d, b j d -> b i j', q, k)
if exists(mask):
mask_value = -torch.finfo(sim.dtype).max
sim = sim.masked_fill(mask, mask_value)
attn = softmax(sim, use_triton = use_triton)
attn = dropout_fn(attn, self.dropout, use_triton = use_triton)
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return dropout_fn(out, self.dropout, use_triton = use_triton)
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.,
use_triton = False
):
super().__init__()
self.use_triton = use_triton
inner_dim = dim * mult
self.dropout = dropout
self.norm = nn.LayerNorm(dim)
self.proj_in_weight = nn.Parameter(torch.randn(dim, inner_dim))
self.proj_in_bias = nn.Parameter(torch.randn(inner_dim))
self.proj_out = nn.Linear(inner_dim, dim)
def forward(self, x, use_triton = None):
use_triton = default(use_triton, self.use_triton)
x = layernorm(x, self.norm.weight, self.norm.bias, use_triton = use_triton)
x = fused_relu_squared(x, self.proj_in_weight, self.proj_in_bias, use_triton = use_triton)
x = dropout_fn(x, self.dropout, use_triton = use_triton)
x = self.proj_out(x)
return x
# main class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
max_seq_len,
depth,
causal = False,
heads = 8,
dim_head = 64,
ff_dropout = 0.,
attn_dropout = 0.,
use_triton = False
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout, use_triton = use_triton),
FeedForward(dim, dropout = ff_dropout, use_triton = use_triton)
]))
self.norm = nn.LayerNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens)
# mask
self.use_triton = use_triton
self.causal = causal
mask = torch.ones(max_seq_len, max_seq_len, dtype = torch.bool).triu(1) if causal else None
self.register_buffer('mask', mask, persistent = False)
def forward(
self,
x,
mask = None,
*,
labels = None,
use_triton = None
):
use_triton = default(use_triton, self.use_triton)
n, device = x.shape[1], x.device
# embed token and add positional embedding
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> () n d')
# generate mask, depending on whether autoregressive or not
if self.causal:
mask = self.mask[:n, :n]
mask = rearrange(mask, 'i j -> () i j')
elif exists(mask):
mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
mask = ~mask
# go through layers
for attn, ff in self.layers:
x = attn(x, mask = mask, use_triton = use_triton) + x
x = ff(x, use_triton = use_triton) + x
x = layernorm(x, self.norm.weight, self.norm.bias, use_triton = use_triton)
logits = self.to_logits(x)
if not exists(labels):
return logits
loss = cross_entropy_fn(logits, labels, ignore_index = 0, use_triton = use_triton)
return loss
| [
"lucidrains@gmail.com"
] | lucidrains@gmail.com |
d2de17bafbf275d1cf8507a86b58db7f9b8add31 | 12f006a0e5d75ef2349d4ae519c1c9cac5309761 | /Solution_907_4.py | 9329286bdef8e1420f5479bdb78e3ee8c2f75bfd | [] | no_license | TimothySjiang/leetcodepy | c613db16282eade713e01b7d641c0f5b341ec84b | ef64e46b8833a684b8b0355ce576b767a0e03596 | refs/heads/master | 2020-07-01T14:48:35.953841 | 2020-01-12T06:19:44 | 2020-01-12T06:19:44 | 201,199,810 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | class Solution:
def sumSubarrayMins(self, A: List[int]) -> int:
res = 0
s = []
A = [0] + A + [0]
for i, x in enumerate(A):
while s and A[s[-1]] > x:
j = s.pop()
k = s[-1]
res += A[j] * (i - j) * (j - k)
s.append(i)
return res % (10**9 + 7) | [
"shjiang@ucdavis.edu"
] | shjiang@ucdavis.edu |
3eadf25d4ee4c8cd157ea0d95e9b6ef32ddf51ea | 0062a19abf334cdb8e7927270106b9ca496ba42e | /boxOffice/admin.py | 88bcf3e8dcb677f3c83572f05a05697efb6f6936 | [] | no_license | Dawinia/movie_backend | 177099d35c5a8f6e89851a3e1e662563d24c242a | 650aa4d7540bc02639a0832069cc540e6c3df3bc | refs/heads/master | 2022-04-22T01:35:54.886991 | 2020-04-23T17:21:30 | 2020-04-23T17:21:30 | 258,153,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | from django.contrib import admin
from .models import BoxOffice, MovieInfo
# Register your models here.
class BoxOfficeAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['yearRate', 'crawlDate']}),
('movieInfo', {'fields': ['movieId', 'movieName', 'releaseInfo']}),
('boxOfficeInfo', {'fields': ['boxRate', 'boxInfo', 'splitBoxInfo', 'sumBoxInfo', 'splitSumBoxInfo']}),
('showInfo', {'fields': ['showInfo', 'showView', 'showRate', 'seatRate']})
]
admin.site.register(BoxOffice, BoxOfficeAdmin)
admin.site.register(MovieInfo)
| [
"dawinialo@163.com"
] | dawinialo@163.com |
56c198953033a0f78f55db2c3afa95d0de6787e9 | 0c084555ac64297faacf2a62df80b7caf3fc8822 | /mygene.info/src/hub/dataload/sources/entrez/unigene_upload.py | cc9caeafb5aaa0749dddab7917463902f7283c54 | [
"Apache-2.0"
] | permissive | NCATS-Tangerine/MyGeneBeacon | ae59b3a49142fe8da57d421a3494000c61cbc35e | 5c70d986d522ee9d9b709784afe992e315a5b76e | refs/heads/master | 2021-05-13T18:05:44.020677 | 2018-01-09T18:58:06 | 2018-01-09T18:58:06 | 116,851,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | from .parser import Gene2UnigeneParser
import biothings.hub.dataload.uploader as uploader
class EntrezUnigeneUploader(uploader.MergerSourceUploader):
name = "entrez_unigene"
main_source = "entrez"
def load_data(self, data_folder):
self.parser = Gene2UnigeneParser(data_folder)
self.parser.set_all_species()
gene2unigene = self.parser.load()
return gene2unigene
@classmethod
def get_mapping(klass):
mapping = {
"unigene": {
"type": "string",
"analyzer": "string_lowercase"
}
}
return mapping
| [
"lhannest@sfu.ca"
] | lhannest@sfu.ca |
cd01e7265b05f24764a2257ba093d8968d37c3f9 | 5d08402e56855d55d0f14ff7642f27e01cadcced | /listing/migrations/0011_auto_20210112_0908.py | dcc94ef6a06111b4ef4ed8b9ac93d7f9c3d52654 | [] | no_license | crowdbotics-apps/test-23821 | 783d757965931f35819343bf57627265dfa5f6e5 | 5cf465eabbd921055d31ac38b54693aa581713a5 | refs/heads/master | 2023-02-12T15:23:18.370076 | 2021-01-14T00:46:16 | 2021-01-14T00:46:16 | 329,440,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | # Generated by Django 2.2.17 on 2021-01-12 09:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('listing', '0010_merge_20210112_0906'),
]
operations = [
migrations.CreateModel(
name='ListingPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('slug', models.SlugField(max_length=30, unique=True)),
('seller_price', models.FloatField(blank=True, null=True)),
('buyer_percentage', models.FloatField(blank=True, null=True)),
('min', models.FloatField(blank=True, null=True)),
('max', models.FloatField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='listing',
name='listing_type',
),
migrations.AddField(
model_name='listing',
name='plan',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='listing.ListingPlan'),
),
]
| [
"dawarsardar786@gmail.com"
] | dawarsardar786@gmail.com |
9119d3d91838e00e3ab736b6c6f7daa0efe4acd9 | 70cf354a5eb38ddaf6e594b162706f3b38925647 | /02-03/todolist/todolist/settings.py | ebbe5f45ca6c4125016ff65607df10be584ea861 | [] | no_license | hanifmisbah/PythonDjango-Practice-Coding | 0051c73c67ddfa9d49e47b1fd77b798b253275bb | e15de23fd1d7f4b118f7149d6d6ff3b25f898b82 | refs/heads/master | 2022-12-25T00:13:09.855630 | 2020-10-05T10:50:13 | 2020-10-05T10:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | """
Django settings for todolist project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '70w@1hcxqg0*em	a9&jx330n*$+=cmckhwwv!f0!yl9tql=_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'task',
# 'delete'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todolist.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todolist.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"hanifmisbah97@gmail.com"
] | hanifmisbah97@gmail.com |
0ae12e49362282d60fe111f7b96ed887d37a6046 | 4c2da39f554f6fb5a92c6ceb886fa5305e085b8c | /mylar/torrent/clients/qbittorrent.py | 6cb1c28065af55d5e0b3ebc6952df6db6b4e290b | [] | no_license | ferretcomp/mylar | c48520f69d43e92145bdcc9c17fd3ef735bf9b0b | 2b2687361b3decadc65b10f1b8319dad165698ea | refs/heads/master | 2021-04-15T08:15:09.923411 | 2018-01-17T20:55:24 | 2018-01-17T20:55:24 | 94,567,775 | 0 | 0 | null | 2017-06-16T17:41:55 | 2017-06-16T17:41:55 | null | UTF-8 | Python | false | false | 5,014 | py | import os
import mylar
import base64
import time
from mylar import logger, helpers
from lib.qbittorrent import client
class TorrentClient(object):
def __init__(self):
self.conn = None
def connect(self, host, username, password):
if self.conn is not None:
return self.connect
if not host:
return {'status': False}
try:
logger.info(host)
self.client = client.Client(host)
except Exception as e:
logger.error('Could not create qBittorrent Object' + str(e))
return {'status': False}
else:
try:
self.client.login(username, password)
except Exception as e:
logger.error('Could not connect to qBittorrent ' + host)
else:
return self.client
def find_torrent(self, hash):
logger.debug('Finding Torrent hash: ' + hash)
torrent_info = self.get_torrent(hash)
if torrent_info:
return True
else:
return False
def get_torrent(self, hash):
logger.debug('Getting Torrent info hash: ' + hash)
try:
torrent_info = self.client.get_torrent(hash)
except Exception as e:
logger.error('Could not get torrent info for ' + hash)
return False
else:
logger.info('Successfully located information for torrent')
return torrent_info
def load_torrent(self, filepath):
logger.info('filepath to torrent file set to : ' + filepath)
if self.client._is_authenticated is True:
logger.info('Checking if Torrent Exists!')
hash = self.get_the_hash(filepath)
logger.debug('Torrent Hash (load_torrent): "' + hash + '"')
logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath)))
#Check if torrent already added
if self.find_torrent(hash):
logger.info('load_torrent: Torrent already exists!')
return {'status': False}
#should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download
#multiple copies of the same issues that's already downloaded
else:
logger.info('Torrent not added yet, trying to add it now!')
try:
torrent_content = open(filepath, 'rb')
tid = self.client.download_from_file(torrent_content, category=str(mylar.QBITTORRENT_LABEL))
except Exception as e:
logger.debug('Torrent not added')
return {'status': False}
else:
logger.debug('Successfully submitted for add. Verifying item is now on client.')
if mylar.QBITTORRENT_STARTONLOAD:
logger.info('attempting to start')
startit = self.client.force_start(hash)
logger.info('startit returned:' + str(startit))
else:
logger.info('attempting to pause torrent incase it starts')
try:
startit = self.client.pause(hash)
logger.info('startit paused:' + str(startit))
except:
logger.warn('Unable to pause torrent - possibly already paused?')
try:
time.sleep(5) # wait 5 in case it's not populated yet.
tinfo = self.get_torrent(hash)
except Exception as e:
logger.warn('Torrent was not added! Please check logs')
return {'status': False}
else:
logger.info('Torrent successfully added!')
filelist = self.client.get_torrent_files(hash)
#logger.info(filelist)
if len(filelist) == 1:
to_name = filelist[0]['name']
else:
to_name = tinfo['save_path']
torrent_info = {'hash': hash,
'files': filelist,
'name': to_name,
'total_filesize': tinfo['total_size'],
'folder': tinfo['save_path'],
'time_started': tinfo['addition_date'],
'label': mylar.QBITTORRENT_LABEL,
'status': True}
#logger.info(torrent_info)
return torrent_info
def get_the_hash(self, filepath):
import hashlib, StringIO
import bencode
# Open torrent file
torrent_file = open(filepath, "rb")
metainfo = bencode.decode(torrent_file.read())
info = metainfo['info']
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
logger.debug('Hash: ' + thehash)
return thehash
| [
"evilhero@gmail.com"
] | evilhero@gmail.com |
fcaf80bdda5cd0b6b0ce65c9fb6246ac8ad6e44f | dbc0202e6a002fae4d86a0b16649af0cc1577b4e | /app/newsh.py | 489d0f8eff1e569c83b5d60dd3031a701929a349 | [] | no_license | huangchao20/CheckPackage | c8613efd844e7ac20cfaf090590b341d1241702f | c2ddddd2578a2b15044573261520f29b3f110450 | refs/heads/master | 2020-04-16T21:56:48.858233 | 2019-01-23T00:39:16 | 2019-01-23T00:39:16 | 165,946,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import os
import re
def createNewSh(str1):
print("开始组建sh脚本")
print("str1=[%s]" % str1)
if os.path.isfile(str1):
pass
def openfile(dpath, filename):
"""
:function:将命令添加到sh脚本里面
:param dpath:
:param filename:
:return:
"""
if os.path.splitext(filename)[1] == ".sh":
flag = True
startflag = "satrtflag"
install = "install "
os.chdir(dpath)
nfilename = "22222222222.sh"
os.rename(filename, nfilename)
with open(nfilename, "r") as f:
with open(filename, 'w+') as fn:
for dd in f:
# 拼接:tmp = 'install XQ-2018-791 TR_45871_X_20181210.sh'
tmp = install + dpath.split("\\").pop() + " " + filename + "\n"
if startflag in dd:
print(tmp)
fn.write(tmp)
elif install in dd and flag == True:
print(tmp)
fn.write(tmp)
flag = False
else:
fn.write(dd)
else:
os.chdir(dpath)
with open(filename, 'r+') as f:
tmp = 'test massage'
f.write(tmp)
if __name__ == '__main__':
dpath = "F:\\黄小宝的宝\\测试目录"
filename = "1111111111.sh" | [
"842713855@qq.com"
] | 842713855@qq.com |
a460369a6cb9776a2850ebfd39f7e10664457c89 | e5d5fa28999bcc6c642bb42dda93afd38e272b81 | /LeetCode/28. Find the Index of the First Occurrence in a String/solve4.py | 6bdbe92c17e5dbadf39ec79dae682ed9224d6700 | [] | no_license | chiahsun/problem_solving | cd3105969983d16d3d5d416d4a0d5797d4b58e91 | 559fafa92dd5516058bdcea82a438eadf5aa1ede | refs/heads/master | 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | class Solution:
def strStr(self, haystack: str, needle: str) -> int:
N, prev, pos = len(needle), -1, -1
fallback = [-1] * N
for i in range(1, N):
while prev >= 0 and needle[i] != needle[prev+1]:
prev = fallback[prev]
if needle[i] == needle[prev+1]:
prev += 1
fallback[i] = prev
for i, c in enumerate(haystack):
while pos >= 0 and needle[pos+1] != c:
pos = fallback[pos]
if pos >= 0 or needle[0] == c:
pos += 1
if pos == N-1:
return i-N+1
return -1
| [
"chiahsun0814@gmail.com"
] | chiahsun0814@gmail.com |
b6930afe6a1a7a309ead3c407c3baa734f28d9c1 | 951500339f5887590fbf83a900dc633887402580 | /examples/06-classes.py | 38f77b461d46ceb82464e6b518e669ca42ae5a97 | [] | no_license | Tanwuld/skillbox-chat | abfd4b6a888c84ee6274ace42e295508594887ef | 4218bebe40167549bee5e6ee45f5b9623f84ef03 | refs/heads/master | 2020-09-29T04:47:45.834886 | 2019-12-12T15:59:37 | 2019-12-12T15:59:37 | 226,955,034 | 1 | 0 | null | 2019-12-12T15:59:39 | 2019-12-09T19:52:57 | Python | UTF-8 | Python | false | false | 1,253 | py | # Created by Artem Manchenkov
# artyom@manchenkoff.me
#
# Copyright © 2019
#
# Объектно-ориентированного программирование, использование классов и объектов
#
# Простой класс с переменными
class Person:
first_name: str
last_name: str
age: int
person1 = Person()
person1.first_name = 'John'
person1.last_name = 'Doe'
person1.age = 43
print(person1.first_name)
# Простой класс с конструктором
class Person:
first_name: str
last_name: str
age: int
def __init__(self, first_name: str, last_name: str, age: int = 0):
self.first_name = first_name
self.last_name = last_name
self.age = age
person1 = Person('John', 'Doe', 43)
print(person1.first_name)
# Класс с методами
class Person:
first_name: str
last_name: str
age: int
def __init__(self, first_name: str, last_name: str, age: int = 0):
self.first_name = first_name
self.last_name = last_name
self.age = age
def info(self):
print(f"Person: {self.first_name} {self.last_name}, age: {self.age}")
person1 = Person('John', 'Doe', 43)
person1.info()
| [
"artyom@manchenkoff.me"
] | artyom@manchenkoff.me |
ef3b7fb16a5b900b2a8336fd5516cad4bdbbb9dd | c2163c653ba589ea610733250230ab4b57ab7d6a | /doc/uses/EPSCs-and-IPSCs/smooth histogram method/04.py | 6d67cefc7771ee5cfdad63b64fa73f9b4ad96701 | [
"MIT"
] | permissive | harshadsbs/SWHLab | 6ff57f816f252da888f5827e9ea677e696e69038 | a86c3c65323cec809a4bd4f81919644927094bf5 | refs/heads/master | 2021-05-20T12:24:55.229806 | 2018-12-14T03:18:38 | 2018-12-14T03:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,568 | py | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def kernel_gaussian(size=100, sigma=None):
sigma=size/10 if sigma is None else int(sigma)
points=np.exp(-np.power(np.arange(size)-size/2,2)/(2*np.power(sigma,2)))
return points/sum(points)
def analyzeSweep(abf):
Y=abf.sweepYsmartbase()
Y=Y[abf.pointsPerSec*.5:]
# create a 1 Kbin histogram with bins centered around 3x the SD from the mean
AV,SD=np.average(Y),np.std(Y)
B1,B2=AV-SD*3,AV+SD*3
nBins=1000
hist, bin_edges = np.histogram(Y, density=False, bins=nBins, range=(B1,B2))
histSmooth=np.convolve(hist,kernel_gaussian(nBins/5),mode='same')
histSmooth=histSmooth/max(histSmooth) # normalize to a peak of 1
centerI=np.where(histSmooth==max(histSmooth))[0][0] # calculate center
histSmooth=np.roll(histSmooth,int(nBins/2-centerI)) # roll data so center is in middle
centerVal=bin_edges[centerI]
EPSC=np.sum(histSmooth[:int(len(histSmooth)/2)])
IPSC=np.sum(histSmooth[int(len(histSmooth)/2):])
return [centerVal,EPSC,IPSC]
if __name__=="__main__":
abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abf=swhlab.ABF(abfFile)
abf.kernel=abf.kernel_gaussian(sizeMS=500) # needed for smart base
Xs,centerVals,EPSCs,IPSCs=[],[],[],[]
for sweep in abf.setsweeps():
print("analyzing sweep",sweep)
centerVal,EPSC,IPSC=analyzeSweep(abf)
Xs.append(abf.sweepStart/60.0)
centerVals.append(centerVal)
EPSCs.append(EPSC)
IPSCs.append(IPSC)
plt.figure(figsize=(10,10))
plt.subplot(211)
plt.grid()
plt.plot(Xs,EPSCs,'r',alpha=.8,lw=2,label="excitation")
plt.plot(Xs,IPSCs,'b',alpha=.8,lw=2,label="inhibition")
plt.ylabel("power (sum norm half)")
plt.xlabel("experiment time (min)")
plt.margins(0,.1)
plt.subplot(212)
plt.grid()
plt.plot(Xs,centerVals,'g',alpha=.8,lw=2)
plt.ylabel("shift WRT baseline")
plt.xlabel("experiment time (min)")
plt.axhline(0,color='k',ls='--')
plt.margins(0,.1)
plt.show()
print("DONE")
| [
"swharden@gmail.com"
] | swharden@gmail.com |
9e46dea8c726e6b556c22797629ff6ce5462f2a9 | 9aaa39f200ee6a14d7d432ef6a3ee9795163ebed | /Algorithm/Python/507. Perfect Number.py | ec25990fcf6a4ba827dcadc6c1d1b4da2a527a0f | [] | no_license | WuLC/LeetCode | 47e1c351852d86c64595a083e7818ecde4131cb3 | ee79d3437cf47b26a4bca0ec798dc54d7b623453 | refs/heads/master | 2023-07-07T18:29:29.110931 | 2023-07-02T04:31:00 | 2023-07-02T04:31:00 | 54,354,616 | 29 | 16 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2017-03-31 23:48:21
# @Last modified by: WuLC
# @Last Modified time: 2017-03-31 23:49:23
# @Email: liangchaowu5@gmail.com
# naive solution
class Solution(object):
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 1: return False
tmp = 1
for i in xrange(2, int(math.sqrt(num))+1):
if num % i == 0:
tmp += (i + num/i)
return tmp == num | [
"liangchaowu5@gmail.com"
] | liangchaowu5@gmail.com |
fb18b89e34d8324bd64c7a65ddc675258ea78b59 | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/input/ch6_code/src/Stepik.6.3.ExerciseBreak.CountUniqueReversalsFor100SyntenyBlocks.py | ebdc61515fc02e393e9c4f69178b127ff28b5644 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # Poorly worded question. Here's my crack at rewording it: What's the maximum number of unique reversals possible on a
# permutation of length 100? So for example, assume you have a permutation of length 2: [+A, +B]...
#
# reverse 1: [+A, +B] to [-A ,+B]
# reverse 2: [+A, +B] to [+A, -B]
# reverse range 1-2: [+A, +B] to [-B, -A]
#
# That's it. Any permutation of length 2 will have a max of 3 unique reversals possible.
#
# Now apply the logic to a permutation of length 100.
total = 0
block_count = 100
for start in range(1, block_count + 1):
# you can turn the following loop to just total += start I left the original in because it's easier to think about
for end in range(1, start + 1):
total += 1
print(f'{total}')
| [
"offbynull@gmail.com"
] | offbynull@gmail.com |
a1232a7431c67b8eab70fd33da37c300d8418e45 | 000002c39ac5c00f1f70d7667d772d3acbe95680 | /batchtest.py | 6b500f8323c871c4a0d69f28526dc462ca91a247 | [] | no_license | ag8/shapes | ab0dcfa1d46c412faf63c421edec9a0165eb5090 | c6b38eca3a50b8a31ab7ccec11158e4a99fb628b | refs/heads/master | 2020-12-03T06:36:40.435743 | 2017-07-07T01:23:23 | 2017-07-07T01:23:23 | 95,675,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from __future__ import print_function
import tensorflow as tf
f = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"]
l = ["l1", "l2", "l3", "l4", "l5", "l6", "l7", "l8"]
fv = tf.constant(f)
lv = tf.constant(l)
rsq = tf.RandomShuffleQueue(10, 0, [tf.string, tf.string], shapes=[[],[]])
do_enqueues = rsq.enqueue_many([fv, lv])
gotf, gotl = rsq.dequeue()
print("Getting batch")
iB, lB = tf.train.batch([gotf, gotl], batch_size=6, num_threads=4, capacity=2 * 3, dynamic_pad=True)
print("Got batch")
with tf.Session() as sess:
tf.global_variables_initializer().run(session=sess)
tf.train.start_queue_runners(sess=sess)
sess.run(do_enqueues)
for i in xrange(4):
one_f, one_l = sess.run([gotf, gotl])
one_l = one_l + '3434'
print("F: ", one_f, "L: ", one_l) | [
"andrew2000g@gmail.com"
] | andrew2000g@gmail.com |
f7eadefbc3b67fe920ef3ab321a31c5a0f3b62e9 | 67e817ca139ca039bd9eee5b1b789e5510119e83 | /Tree/[662]Maximum Width of Binary Tree.py | de96b2b77ecccee00510b6deba357ba2222af7b4 | [] | no_license | dstch/my_leetcode | 0dc41e7a2526c2d85b6b9b6602ac53f7a6ba9273 | 48a8c77e81cd49a75278551048028c492ec62994 | refs/heads/master | 2021-07-25T21:30:41.705258 | 2021-06-06T08:58:29 | 2021-06-06T08:58:29 | 164,360,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | # Given a binary tree, write a function to get the maximum width of the given tr
# ee. The maximum width of a tree is the maximum width among all levels.
#
# The width of one level is defined as the length between the end-nodes (the le
# ftmost and right most non-null nodes in the level, where the null nodes between
# the end-nodes are also counted into the length calculation.
#
# It is guaranteed that the answer will in the range of 32-bit signed integer.
#
#
# Example 1:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \ \
# 5 3 9
#
# Output: 4
# Explanation: The maximum width existing in the third level with the length 4 (
# 5,3,null,9).
#
#
# Example 2:
#
#
# Input:
#
# 1
# /
# 3
# / \
# 5 3
#
# Output: 2
# Explanation: The maximum width existing in the third level with the length 2 (
# 5,3).
#
#
# Example 3:
#
#
# Input:
#
# 1
# / \
# 3 2
# /
# 5
#
# Output: 2
# Explanation: The maximum width existing in the second level with the length 2
# (3,2).
#
#
# Example 4:
#
#
# Input:
#
# 1
# / \
# 3 2
# / \
# 5 9
# / \
# 6 7
# Output: 8
# Explanation:The maximum width existing in the fourth level with the length 8 (
# 6,null,null,null,null,null,null,7).
#
#
#
# Constraints:
#
#
# The given binary tree will have between 1 and 3000 nodes.
#
# Related Topics Tree
# 👍 2131 👎 380
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
res = []
result = 0
def func(node, level, index):
if node:
nonlocal result
if len(res) == level:
res.append([])
res[level].append(index)
func(node.left, level + 1, index * 2)
func(node.right, level + 1, index * 2 + 1)
temp = res[level][-1] - res[level][0] + 1
if temp > result:
result = temp
func(root, 0, 1)
return result
# leetcode submit region end(Prohibit modification and deletion)
| [
"dstch@163.com"
] | dstch@163.com |
aaac2a8d988a604a4d19efaa994359ce8a18e87f | aaa6ae528d66e711f41699d6b6ee79fa059be4f8 | /satchmo/shipping/modules/tieredweightzone/migrations/0002_auto_20190417_1857.py | 693f7226b1d816c86b07f97d97552a1cbf02d836 | [
"BSD-2-Clause"
] | permissive | ToeKnee/jelly-roll | c23e1eac1c2983ede4259bd047578c404a8c72e0 | c2814749c547349ff63415bdc81f53eb1215c7c0 | refs/heads/master | 2020-05-21T22:34:00.399719 | 2020-02-03T20:20:02 | 2020-02-03T20:20:02 | 33,657,967 | 0 | 1 | null | 2015-07-21T20:36:13 | 2015-04-09T08:37:28 | Python | UTF-8 | Python | false | false | 1,617 | py | # Generated by Django 2.1.7 on 2019-04-17 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tieredweightzone', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='carrier',
name='delivery',
field=models.CharField(default='', max_length=200, verbose_name='Delivery Days'),
preserve_default=False,
),
migrations.AddField(
model_name='carrier',
name='description',
field=models.CharField(default='', max_length=200, verbose_name='Description'),
preserve_default=False,
),
migrations.AddField(
model_name='carrier',
name='method',
field=models.CharField(default='', help_text='i.e. US Mail', max_length=200, verbose_name='Method'),
preserve_default=False,
),
migrations.AddField(
model_name='carrier',
name='name',
field=models.CharField(default='', max_length=50, verbose_name='Carrier'),
preserve_default=False,
),
migrations.AddField(
model_name='zone',
name='description',
field=models.CharField(default='', max_length=200, verbose_name='Description'),
preserve_default=False,
),
migrations.AddField(
model_name='zone',
name='name',
field=models.CharField(default='', max_length=50, verbose_name='Zone'),
preserve_default=False,
),
]
| [
"tony@ynottony.net"
] | tony@ynottony.net |
a9c83352baf1af5398777f9338af863c8f4e6112 | bc25016fdae676eb7b000e59b8e823da6fefe157 | /servo/stm32uart.py | d0758b97afd2735e62bd0b36b9621b37d3a6cf0f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mmind/servo-hdctools | b275d98e8c71b46cfc326aad774ce83b29b55d58 | c7d50190837497dafc45f6efe18bf01d6e70cfd2 | refs/heads/master | 2020-06-24T20:41:19.110569 | 2016-11-28T13:04:07 | 2016-11-28T13:04:07 | 74,622,430 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,724 | py | # Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow creation of uart/console interface via stm32 usb endpoint."""
import errno
import exceptions
import logging
import os
import pty
import select
import sys
import termios
import threading
import time
import tty
import usb
import stm32usb
import uart
class SuartError(Exception):
"""Class for exceptions of Suart."""
def __init__(self, msg, value=0):
"""SuartError constructor.
Args:
msg: string, message describing error in detail
value: integer, value of error when non-zero status returned. Default=0
"""
super(SuartError, self).__init__(msg, value)
self.msg = msg
self.value = value
class Suart(uart.Uart):
"""Provide interface to stm32 serial usb endpoint."""
def __init__(self, vendor=0x18d1, product=0x501a, interface=0,
serialname=None, ftdi_context=None):
"""Suart contstructor.
Initializes stm32 USB stream interface.
Args:
vendor: usb vendor id of stm32 device
product: usb product id of stm32 device
interface: interface number of stm32 device to use
serialname: n/a. Defaults to None.
ftdi_context: n/a. Defaults to None.
Raises:
SuartError: If init fails
"""
super(Suart, self).__init__()
self._logger = logging.getLogger('Suart')
self._logger.debug('')
self._logger.debug('Suart opening %04x:%04x, intf %d, sn: %s' % (
vendor, product, interface, serialname))
self._susb = stm32usb.Susb(vendor=vendor, product=product,
interface=interface, serialname=serialname, logger=self._logger)
self._logger.debug("Set up stm32 uart")
def __del__(self):
"""Suart destructor."""
self._logger.debug('')
def run_rx_thread(self):
self._logger.debug('rx thread started on %s' % self.get_pty())
ep = select.epoll()
ep.register(self._ptym, select.EPOLLHUP)
while True:
events = ep.poll(0)
# Check if the pty is connected to anything, or hungup.
if not events:
try:
r = self._susb._read_ep.read(64, self._susb.TIMEOUT_MS)
if r:
os.write(self._ptym, r)
except Exception as e:
# If we miss some characters on pty disconnect, that's fine.
# ep.read() also throws USBError on timeout, which we discard.
if type(e) not in [exceptions.OSError, usb.core.USBError]:
self._logger.debug("rx %s: %s" % (self.get_pty(), e))
else:
time.sleep(.1)
def run_tx_thread(self):
self._logger.debug("tx thread started on %s" % self.get_pty())
ep = select.epoll()
ep.register(self._ptym, select.EPOLLHUP)
while True:
events = ep.poll(0)
# Check if the pty is connected to anything, or hungup.
if not events:
try:
r = os.read(self._ptym, 64)
if r:
self._susb._write_ep.write(r, self._susb.TIMEOUT_MS)
except Exception as e:
self._logger.debug("tx %s: %s" % (self.get_pty(), e))
else:
time.sleep(.1)
def run(self):
"""Creates pthreads to poll stm32 & PTY for data.
"""
self._logger.debug('')
m, s = os.openpty()
self._ptyname = os.ttyname(s)
self._logger.debug("PTY name: %s" % self._ptyname)
self._ptym = m
self._ptys = s
os.fchmod(s, 0o660)
# Change the owner and group of the PTY to the user who started servod.
try:
uid = int(os.environ.get('SUDO_UID', -1))
except TypeError:
uid = -1
try:
gid = int(os.environ.get('SUDO_GID', -1))
except TypeError:
gid = -1
os.fchown(s, uid, gid)
tty.setraw(self._ptym, termios.TCSADRAIN)
# Generate a HUP flag on pty slave fd.
os.fdopen(s).close()
self._logger.debug('stm32 uart pty is %s' % self.get_pty())
self._rx_thread = threading.Thread(target=self.run_rx_thread, args=[])
self._rx_thread.daemon = True
self._rx_thread.start()
self._tx_thread = threading.Thread(target=self.run_tx_thread, args=[])
self._tx_thread.daemon = True
self._tx_thread.start()
self._logger.debug('stm32 rx and tx threads started.')
def get_uart_props(self):
"""Get the uart's properties.
Returns:
dict where:
baudrate: integer of uarts baudrate
bits: integer, number of bits of data Can be 5|6|7|8 inclusive
parity: integer, parity of 0-2 inclusive where:
0: no parity
1: odd parity
2: even parity
sbits: integer, number of stop bits. Can be 0|1|2 inclusive where:
0: 1 stop bit
1: 1.5 stop bits
2: 2 stop bits
"""
self._logger.debug('')
return {'baudrate': 115200,
'bits': 8,
'parity': 0,
'sbits': 1}
def set_uart_props(self, line_props):
"""Set the uart's properties. Note that Suart cannot set properties
and will fail if the properties are not the default 115200,8n1.
Args:
line_props: dict where:
baudrate: integer of uarts baudrate
bits: integer, number of bits of data ( prior to stop bit)
parity: integer, parity of 0-2 inclusive where
0: no parity
1: odd parity
2: even parity
sbits: integer, number of stop bits. Can be 0|1|2 inclusive where:
0: 1 stop bit
1: 1.5 stop bits
2: 2 stop bits
Raises:
SuartError: If requested line properties are not the default.
"""
self._logger.debug('')
curr_props = self.get_uart_props()
for prop in line_props:
if line_props[prop] != curr_props[prop]:
raise SuartError("Line property %s cannot be set from %s to %s" % (
prop, curr_props[prop], line_props[prop]))
return True
def get_pty(self):
"""Gets path to pty for communication to/from uart.
Returns:
String path to the pty connected to the uart
"""
self._logger.debug('')
return self._ptyname
def test():
format='%(asctime)s - %(name)s - %(levelname)s'
loglevel = logging.INFO
if True:
loglevel = logging.DEBUG
format += ' - %(filename)s:%(lineno)d:%(funcName)s'
format += ' - %(message)s'
logging.basicConfig(level=loglevel, format=format)
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logger.info('Start')
sobj = Suart()
sobj.run()
logging.info('%s' % sobj.get_pty())
# run() is a thread so just busy wait to mimic server
while True:
# ours sleeps to eleven!
time.sleep(11)
if __name__ == '__main__':
try:
test()
except KeyboardInterrupt:
sys.exit(0)
| [
"chrome-bot@chromium.org"
] | chrome-bot@chromium.org |
5252b25fafef0707d008c0d81a0299eea6cd5383 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/python/kernel_tests/where_op_test.py | 17575da6f1bf2c226a67419b4bc8156f70f6dedc | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 5,579 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.test_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def testWrongNumbers(self):
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
def testRandomBool(self):
self._testRandom(np.bool)
def testRandomInt32(self):
self._testRandom(np.int32)
def testRandomInt64(self):
self._testRandom(np.int64)
def testRandomFloat(self):
self._testRandom(np.float32)
def testRandomDouble(self):
self._testRandom(np.float64)
def testRandomComplex64(self):
self._testRandom(np.complex64)
def testRandomComplex128(self):
self._testRandom(np.complex128)
def testRandomUint8(self):
self._testRandom(np.uint8)
def testRandomInt8(self):
self._testRandom(np.int8)
def testRandomInt16(self):
self._testRandom(np.int16)
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.test_session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session() as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
9cb4ecfd020a49a4c42e9f0db47935b6e84e0704 | 13a4df75e81ee4330a197340a300ec0755247a93 | /Strings/4.balanceStrings.py | 7e038243f1e6a517488ccb561510ce15d9197e87 | [] | no_license | ltfafei/py_Leetcode_study | d22955380bf9f134bc9cb215fea73ec4f9ea94cf | 0fd1bca56a621001cf9093f60941c4bfed4c79a5 | refs/heads/master | 2023-07-13T18:15:59.098314 | 2021-08-30T15:11:17 | 2021-08-30T15:11:17 | 363,597,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | #!/usr/bin/python
# Env: python3
# Rewrite by afei_0and1
'''
4.1、平衡字符串
现在输入一个只包含L和R的字符串,并其中L与R的个数是相等的。符合这种输入条件的字符串称之为”平衡字符串“。
要求通过编程对输入的平衡字符串进行分割,尽可能多的分割出平衡字符串子串,并将可以得到的子串数量返回。
例如:输入:RLLRRRL,将返回结果:3,其可以分割成:RL、LLRR和RL;输入:LLLRRR将返回结果:1,因为其只能分割
出LLLRRR。
'''
def balanceStrings(string):
res = 0 #最终组合结果
lc = 0 #L字符的个数
rc = 0 #R字符的个数
#对字符串进行遍历
for i in string:
if i == "L":
lc += 1
if i == "R":
rc += 1
#字符R和L个数相同
if rc == lc:
res += 1
#重置
lc = 0
rc = 0
return res
print(balanceStrings("RLLLRRRL"))
'''
Output result:
3
'''
'''
4.2、分割回文字符串
要求输入一个字符串,将此字符串分割成一些子串,使得每个子串都是回文字符串(单字符的字符串也属于
回文字符串)。要求通过编程将所有的分割结果返回。例如:输入字符串“abb”,返回
[
["a", "b", "b"], ["a", "bb"]
]这个二维列表作为答案(列表中元素位置可以变动)。
'''
def isPlalind(string):
#逆序判断是否是回文字符串
return string == string[::-1]
#start:起始位置,string:分割字符串
#l:已经产生回文串子列表,res:存放结果
def cut_plalindString(start, string, l, res):
if start > len(string) - 1:
res.append(list(l))
return
#for循环继续分割
for index in range(start+1, len(string)+1):
strings = string[start:index]
if isPlalind(strings):
cut_plalindString(index, string, l+[string[start:index]], res)
def func(string):
res = []
cut_plalindString(0, string, [], res)
return res
print(func("abb"))
'''
Output result:
[['a', 'b', 'b'], ['a', 'bb']]
''' | [
"m18479685120@163.com"
] | m18479685120@163.com |
854703fda0ce649f68979599bf1e07ee0f3ca0ee | 40125ea7386e269bbae2425a318a3fd2e8571cb3 | /src/ie/urls_library.py | d191538ba4416aa1650b93c462b5b0788bef5722 | [
"MIT"
] | permissive | compressore/moc | bb160a308562e6e57fe4300a8d8a6ee00a59e785 | 8e05e3e60d2d2c7534e0c659b6ed0743e9189f6b | refs/heads/master | 2023-02-11T18:13:44.096427 | 2021-01-06T11:08:02 | 2021-01-06T11:08:02 | 327,283,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """ Project-specific URL Configuration
See urls.py for more information
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from core.admin import admin_site
current_site = "library"
urlpatterns = [
path("admin/", admin_site.urls),
path("tinymce/", include("tinymce.urls")),
]
for key,value in settings.PROJECT_LIST.items():
if key == current_site:
# This makes the current site be mounted on the root directory
get_path = ""
else:
get_path = value["url"]
urlpatterns += [
path(get_path, include(key+".urls")),
]
| [
"paul@penguinprotocols.com"
] | paul@penguinprotocols.com |
aca32633a7afe077270b8ec6cb5ecd7dd189ccc3 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/sacremoses/__init__.py | 48cc9af42c69614012407cbaee0083bb18bc67f6 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4b560216eecc87542d164c228258494730d7eeb75b4bddbd9bf242ff0b88cfb8
size 196
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
c664d8e4031d11d62a0818bb29281830bbbd6ece | a6194c0c20dc09a115f54ebd6a02fbbb55e206e8 | /dqn_agent/q_network.py | 3970d8bc020f04f36e19d8113c5b9155449e1612 | [] | no_license | Rowing0914/tf_agent_investigation | c1149c1b7371c070ef8513e7bf0fd63a48d33cee | dbce3862abf12a21115e67e6391314f8d866b658 | refs/heads/master | 2020-08-13T13:53:58.249071 | 2019-10-14T07:56:40 | 2019-10-14T07:56:40 | 214,979,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | from __future__ import absolute_import, division, print_function
import gin, tensorflow as tf
from tf_agents.networks import encoding_network, network
def validate_specs(action_spec, observation_spec):
del observation_spec
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise ValueError("Network only supports action_specs with a single action.")
if flat_action_spec[0].shape not in [(), (1,)]:
raise ValueError("Network only supports action_specs with shape in [(), (1,)])")
@gin.configurable
class QNetwork(network.Network):
def __init__(self,
input_tensor_spec,
action_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_prarams=None,
fc_layer_params=(75, 40),
dropout_layer_params=None,
activation_fn=tf.keras.activations.relu,
kernel_initializer=None,
batch_squash=True,
dtype=tf.float32,
name="QNetwork"):
validate_specs(action_spec, input_tensor_spec)
action_spec = tf.nest.flatten(action_spec)[0]
num_actions = action_spec.maximum - action_spec.minimum + 1
encoder_input_tensor_spec = input_tensor_spec
encoder = encoding_network.EncodingNetwork(encoder_input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_prarams,
fc_layer_params=fc_layer_params,
dropout_layer_params=dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=batch_squash,
dtype=dtype)
q_value_layer = tf.keras.layers.Dense(num_actions,
activation=None,
kernel_initializer=tf.compat.v1.initializers.random_uniform(minval=0.03,
maxval=0.03),
bias_initializer=tf.compat.v1.initializers.constant(-0.2),
dtype=dtype)
super(QNetwork, self).__init__(input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
self._encoder = encoder
self._q_value_layer = q_value_layer
def call(self, observation, step_type=None, network_state=()):
state, network_state = self._encoder(observation, step_type=step_type, network_state=network_state)
return self._q_value_layer(state), network_state | [
"kosakaboat@gmail.com"
] | kosakaboat@gmail.com |
49e463ea9d2c04cee39afcce3719c6f9c650dad7 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/netapp/volumes/replications/reverse.py | 8cd797fd2d4a34131e35fbbbbdc0477a66f1efbb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 3,224 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverse a Cloud NetApp Volume Replication's direction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.netapp.volumes.replications import client as replications_client
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.netapp import flags
from googlecloudsdk.command_lib.netapp.volumes.replications import flags as replications_flags
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Reverse(base.Command):
"""Reverse a Cloud NetApp Volume Replication's direction."""
_RELEASE_TRACK = base.ReleaseTrack.GA
detailed_help = {
'DESCRIPTION': """\
Reverse a Cloud NetApp Volume Replication.
""",
'EXAMPLES': """\
The following command reverses a Replication named NAME using the required arguments:
$ {command} NAME --location=us-central1 --volume=vol1
To reverse a Replication named NAME asynchronously, run the following command:
$ {command} NAME --location=us-central1 --volume=vol1 --async
""",
}
@staticmethod
def Args(parser):
concept_parsers.ConceptParser(
[
flags.GetReplicationPresentationSpec(
'The Replication to reverse direction.'
)
]
).AddToParser(parser)
replications_flags.AddReplicationVolumeArg(parser, reverse_op=True)
flags.AddResourceAsyncFlag(parser)
def Run(self, args):
"""Reverse a Cloud NetApp Volume Replication's direction in the current project."""
replication_ref = args.CONCEPTS.replication.Parse()
if args.CONCEPTS.volume.Parse() is None:
raise exceptions.RequiredArgumentException(
'--volume', 'Requires a volume to reverse replication of'
)
client = replications_client.ReplicationsClient(self._RELEASE_TRACK)
result = client.ReverseReplication(
replication_ref, args.async_)
if args.async_:
command = 'gcloud {} netapp volumes replications list'.format(
self.ReleaseTrack().prefix
)
log.status.Print(
'Check the status of the reversed replication by listing all'
' replications:\n $ {} '.format(command)
)
return result
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ReverseBeta(Reverse):
"""Reverse a Cloud NetApp Volume Replication's direction."""
_RELEASE_TRACK = base.ReleaseTrack.BETA
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
8c8158918e991fc5c7b0286a8c393cab6a256e10 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_algos/rf/pyunit_custom_metrics_pubdev_5088.py | 2a91952de3807564ef0b33cc1b40b49fbe8907f1 | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,637 | py | import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from tests.pyunit_utils import CustomMaeFunc, CustomRmseFunc,\
assert_correct_custom_metric, regression_model, multinomial_model, binomial_model
from h2o.estimators.random_forest import H2ORandomForestEstimator
# Custom model metrics fixture
def custom_mae_mm():
return h2o.upload_custom_metric(CustomMaeFunc, func_name="mae", func_file="mm_mae.py")
def custom_rmse_mm():
return h2o.upload_custom_metric(CustomRmseFunc, func_name="rmse", func_file="mm_rmse.py")
# Test that the custom model metric is computed
# and compare them with implicit custom metric
def test_custom_metric_computation_regression():
(model, f_test) = regression_model(H2ORandomForestEstimator, custom_mae_mm())
assert_correct_custom_metric(model, f_test, "mae", "Regression on prostate")
def test_custom_metric_computation_binomial():
(model, f_test) = binomial_model(H2ORandomForestEstimator, custom_rmse_mm())
assert_correct_custom_metric(model, f_test, "rmse", "Binomial on prostate")
def test_custom_metric_computation_multinomial():
(model, f_test) = multinomial_model(H2ORandomForestEstimator, custom_rmse_mm())
assert_correct_custom_metric(model, f_test, "rmse", "Multinomial on iris")
# Tests to invoke in this suite
__TESTS__ = [
test_custom_metric_computation_binomial,
test_custom_metric_computation_regression,
test_custom_metric_computation_multinomial
]
if __name__ == "__main__":
for func in __TESTS__:
pyunit_utils.standalone_test(func)
else:
for func in __TESTS__:
func()
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
0cb7704f335aa1089c00787ef0e0221b9dfc20c3 | b316c1d1e57ca197b0b24625b5ceede12905a979 | /tango_with_django_project/tango_with_django_project/urls.py | 67adf6c79e5fb3c4505ed3c1b74f796dcb90e178 | [] | no_license | JayWelborn/Rango | 962ed888e33c591074c80cbf07f77edca2d4d821 | 41c832c9bc791f910b948fe9026cd41fc12cf129 | refs/heads/master | 2021-01-22T22:16:35.134729 | 2017-09-22T19:05:02 | 2017-09-22T19:05:02 | 92,766,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | """tango_with_django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
from rango import views
urlpatterns = [
url(r'^rango/', include('rango.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/register/$',
views.MyRegistrationView.as_view(),
name='registration_register'),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^$', views.index, name='index')
]
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
| [
"jesse.welborn@gmail.com"
] | jesse.welborn@gmail.com |
19e82b23b4f97c9a1c1bd88fe98d27f73b01476b | 3faf4b9fb76145b2326446bc6bc190a5712b3b62 | /Algorithms/0695 Max Area of Island.py | cdc720cba3b793eb17af5a9da0ce13bc619edfd6 | [] | no_license | cravo123/LeetCode | b93c18f3e4ca01ea55f4fdebceca76ccf664e55e | 4c1288c99f78823c7c3bac0ceedd532e64af1258 | refs/heads/master | 2021-07-12T11:10:26.987657 | 2020-06-02T12:24:29 | 2020-06-02T12:24:29 | 152,670,206 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # Solution 1, DFS
class Solution:
def dfs(self, i, j, grid, m, n):
area = 1
grid[i][j] = 0
for di, dj in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
x, y = i + di, j + dj
if 0 <= x < m and 0 <= y < n and grid[x][y] == 1:
area += self.dfs(x, y, grid, m, n)
return area
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0]) if grid else 0
res = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
curr = self.dfs(i, j, grid, m, n)
res = max(res, curr)
return res
# Solution 1.1, another DFS implementation
class Solution:
def dfs(self, i, j, A, m, n):
if i < 0 or i >= m or j < 0 or j >= n or A[i][j] == 0:
return 0
curr = 1
A[i][j] = 0
for di, dj in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
x, y = i + di, j + dj
curr += self.dfs(x, y, A, m, n)
return curr
def maxAreaOfIsland(self, grid: 'List[List[int]]') -> 'int':
res = 0
m, n = len(grid), len(grid[0]) if grid else 0
for i in range(m):
for j in range(n):
res = max(res, self.dfs(i, j, grid, m, n))
return res
| [
"cc3630@columbia.edu"
] | cc3630@columbia.edu |
c3f9cd2060490f4c7f83b91a605c42694ee81a49 | ee92057a8ebc91ba90d8055a9bece25d24211499 | /kattis/maximum-points-you-can-obtain-from-cards/maximum-points-you-can-obtain-from-cards.py | 5705a97eb87d0bfc290539344a5fe68a41a69367 | [] | no_license | KendrickAng/competitive-programming | ce0a4f44f592f295c2f8cd7e854139f18fb8853a | f9768a2020f801b8e4787cc853398b8258a0bf09 | refs/heads/master | 2022-05-29T07:21:32.607089 | 2022-04-24T16:35:14 | 2022-04-24T16:35:14 | 254,402,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
# calculate prefix sums
n = len(cardPoints)
prefix = [0]
for i in range(n):
tmp = prefix[-1] + cardPoints[i]
prefix.append(tmp)
#print(prefix)
# move fixed sliding window
left = k
right = n
ans = -1
while left >= 0:
leftSum = prefix[left]
rightSum = prefix[n] - prefix[right]
points = leftSum + rightSum
#print(f"{leftSum} {rightSum} {points}")
ans = max(ans, points)
left -= 1
right -= 1
return ans
"""
1 2 3 4 5 6 1, k = 3
1 6 5
9 7 7 9 7 7 9, k = 7
take all
idea: fixed sliding window, length k
1. calculate prefix sums
2. move fixed window (size n - k) from rightmost to leftmost
""" | [
"kendrick.wh@outlook.com"
] | kendrick.wh@outlook.com |
37b64b825968043ad9deba9d9b7b60f106080bfb | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coderByte_20200518233550.py | ef8c2488afe45ffa8446a7d96de9b95d1352174d | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py |
def QuestionsMarks(str):
a,b = 0,0
for i in range(len(s)-1):
for j in range(i,len(s)):
if s[i].isdigit() and s[j].isdigit() and int(s[i])+int(j[i]) == 10:
a,b = i,j
new = s[a+1:b+1]
if new.count('?') == 3:
return 'true'
else:
return 'false'
# print(numbers[0:numbers[i]])
# break
print(others)
# keep this function call here
QuestionsMarks("acc?7??sss?3rr1??????5") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
ddaa9fc9222adb6cea73a896b2bad36c63421164 | 17993dcca87d490bc9841437309f309a5592ab38 | /Codes/linear_regression/exercise/face_solution.py | 3ee098e36b1d89f939fdf469515fb53525213a62 | [] | no_license | dreamlikexin/machine_learning | bc86ea15ef8552ad1be78a5bc65fb74a2cdb274e | 850e87025270847210b6ad188d2da181983a72c7 | refs/heads/master | 2022-01-16T09:51:20.538340 | 2019-06-19T16:27:26 | 2019-06-19T16:27:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
data = fetch_olivetti_faces()
data = data.images.reshape((len(data.images), -1))
n_pixels = data.shape[1]
X = data[:, :(n_pixels + 1) // 2]
y = data[:, n_pixels // 2:]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
image_shape = (64, 64)
id = 5
true_face = np.hstack((X_test[id], y_test[id]))
pred_face = np.hstack((X_test[id], y_pred[id]))
plt.figure(0)
plt.imshow(true_face.reshape(image_shape), interpolation="nearest")
plt.figure(1)
plt.imshow(pred_face.reshape(image_shape), interpolation="nearest")
plt.show()
| [
"wanglei@wanglei-mbp.local"
] | wanglei@wanglei-mbp.local |
1a7a8fdd1634fdda99eea85a2692323fb7209a0f | a411a55762de11dc2c9d913ff33d2f1477ac02cf | /lte/gateway/python/integ_tests/s1aptests/test_ipv6_non_nat_dp_ul_tcp.py | 43041ea8822b77bd9e137145804ac43a2c93d85f | [
"BSD-3-Clause"
] | permissive | magma/magma | 0dc48c1513d9968bd05fb7589f302c192b7c0f94 | 0e1d895dfe625681229e181fbc2dbad83e13c5cb | refs/heads/master | 2023-09-04T09:31:56.140395 | 2023-08-29T13:54:49 | 2023-08-29T13:54:49 | 170,803,235 | 1,219 | 525 | NOASSERTION | 2023-09-07T17:45:42 | 2019-02-15T04:46:24 | C++ | UTF-8 | Python | false | false | 4,053 | py | """
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import time
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
from s1ap_utils import MagmadUtil
class TestIpv6NonNatDpUlTcp(unittest.TestCase):
"""Integration Test: TestAttachDetachNonNatDpUlTcp"""
def __init__(self, method_name: str) -> None:
"""Initialize unittest class"""
super().__init__(methodName=method_name)
self.magma_utils = MagmadUtil(None)
def setUp(self):
"""Initialize before test case execution"""
self.magma_utils.disable_nat(ip_version=6)
self._s1ap_wrapper = s1ap_wrapper.TestWrapper(ip_version=6)
def tearDown(self):
"""Cleanup after test case execution"""
self._s1ap_wrapper.cleanup()
self.magma_utils.enable_nat(ip_version=6)
def test_ipv6_non_nat_dp_ul_tcp(self):
"""Basic attach/detach and UL TCP ipv6 data test with a single UE"""
num_ues = 1
magma_apn = {
"apn_name": "magma", # APN-name
"qci": 9, # qci
"priority": 15, # priority
"pre_cap": 1, # preemption-capability
"pre_vul": 0, # preemption-vulnerability
"mbr_ul": 200000000, # MBR UL
"mbr_dl": 100000000, # MBR DL
"pdn_type": 1, # PDN Type 0-IPv4,1-IPv6,2-IPv4v6
}
wait_for_s1 = True
ue_ips = ["fdee::"]
apn_list = [magma_apn]
self._s1ap_wrapper.configUEDevice(num_ues, [], ue_ips)
req = self._s1ap_wrapper.ue_req
ue_id = req.ue_id
print(
"************************* Running End to End attach for ",
"UE id ",
req.ue_id,
)
self._s1ap_wrapper.configAPN(
"IMSI" + "".join([str(j) for j in req.imsi]),
apn_list,
default=False,
)
# Now actually complete the attach
self._s1ap_wrapper.s1_util.attach(
ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
pdn_type=2,
)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# Receive Router Advertisement message
apn = "magma"
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ROUTER_ADV_IND.value
router_adv = response.cast(s1ap_types.ueRouterAdv_t)
print(
"********** Received Router Advertisement for APN-%s"
" bearer id-%d" % (apn, router_adv.bearerId),
)
ipv6_addr = "".join([chr(i) for i in router_adv.ipv6Addr]).rstrip(
"\x00",
)
print("********** UE IPv6 address: ", ipv6_addr)
ipaddress.ip_address(ipv6_addr)
self._s1ap_wrapper.s1_util.update_ipv6_address(ue_id, ipv6_addr)
print("Sleeping for 5 secs")
time.sleep(5)
print(
"************************* Running UE uplink (TCP) for UE id ",
req.ue_id,
)
with self._s1ap_wrapper.configUplinkTest(req, duration=1) as test:
test.verify()
print(
"************************* Running UE detach for UE id ",
req.ue_id,
)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id,
s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,
wait_for_s1,
)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | magma.noreply@github.com |
f26cfb2eec843676ac2c9085e502fcee2b1f71ce | 43842089122512e6b303ebd05fc00bb98066a5b2 | /dynamic_programming/213_house_robber_ii.py | 2b579f56defc49ef1f5790ac7012f6ace3142db0 | [] | no_license | mistrydarshan99/Leetcode-3 | a40e14e62dd400ddb6fa824667533b5ee44d5f45 | bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6 | refs/heads/master | 2022-04-16T11:26:56.028084 | 2020-02-28T23:04:06 | 2020-02-28T23:04:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py |
"""
213. House Robber II
Medium
627
18
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed. All houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, adjacent houses have security system connected and it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police.
Example 1:
Input: [2,3,2]
Output: 3
Explanation: You cannot rob house 1 (money = 2) and then rob house 3 (money = 2),
because they are adjacent houses.
Example 2:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
"""
def robbery(houses):
"""
input houses: list[int]
output total: int
"""
if len(houses) <= 3:
return max(houses or [0])
def dplist(new_houses):
new_houses = [0] + new_houses
dp = new_houses[0:3]
total = 0
for i in range(3, len(new_houses)):
dp.append(new_houses[i] + max(dp[i-2], dp[i-3]))
if dp[i] > total:
total = dp[i]
return total
return(max(dplist(houses[1:]), dplist(houses[:len(houses)-1])))
houses = [1,3,1,3,100]
print(robbery(houses))
| [
"maplesuger@hotmail.com"
] | maplesuger@hotmail.com |
24e705f289cc6429a70f3dce754c533a3befe52e | 991d0b40a9ddb5ea6a72e3c018a74c6135792909 | /freenodejobs/admin/views.py | 9d6fb0fbcfa91721fbec85b8e0219cf6646f0888 | [
"MIT"
] | permissive | freenode/freenodejobs | 9afe699713efb915f5008c1ee2299a25604ab351 | 235388c88ac6f984f36cd20074542e21369bcc8b | refs/heads/master | 2021-06-11T05:09:14.255759 | 2019-10-22T13:46:44 | 2019-10-22T13:46:44 | 128,451,423 | 4 | 5 | MIT | 2021-03-18T20:31:54 | 2018-04-06T19:16:05 | JavaScript | UTF-8 | Python | false | false | 2,306 | py | from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from freenodejobs.jobs.enums import StateEnum
from freenodejobs.utils.paginator import AutoPaginator
from freenodejobs.utils.decorators import staff_required
from freenodejobs.jobs.models import Job
from .forms import ApproveForm, RejectForm, RemoveForm
@staff_required
def view(request, state_slug=''):
try:
state = StateEnum[state_slug.upper()]
except KeyError:
return redirect(
'admin:view',
StateEnum.WAITING_FOR_APPROVAL.name.lower(),
)
jobs = Job.objects.filter(state=state)
page = AutoPaginator(request, jobs, 20).current_page()
by_state = Job.objects.by_state()
return render(request, 'admin/view.html', {
'page': page,
'state': state,
'by_state': by_state,
})
@staff_required
def approve(request, slug):
job = get_object_or_404(Job, slug=slug)
if request.method == 'POST':
form = ApproveForm(job, request.POST)
if form.is_valid():
form.save(request.user)
messages.success(request, "Job was approved.")
return redirect('admin:view')
else:
form = ApproveForm(job)
return render(request, 'admin/approve.html', {
'job': job,
'form': form,
})
@staff_required
def reject(request, slug):
job = get_object_or_404(Job, slug=slug)
if request.method == 'POST':
form = RejectForm(job, request.POST)
if form.is_valid():
form.save(request.user)
messages.success(request, "Job was rejected.")
return redirect('admin:view')
else:
form = RejectForm(job)
return render(request, 'admin/reject.html', {
'job': job,
'form': form,
})
@staff_required
def remove(request, slug):
job = get_object_or_404(Job, slug=slug)
if request.method == 'POST':
form = RemoveForm(job, request.POST)
if form.is_valid():
form.save(request.user)
messages.success(request, "Job was removed.")
return redirect('admin:view')
else:
form = RemoveForm(job)
return render(request, 'admin/remove.html', {
'job': job,
'form': form,
})
| [
"chris@chris-lamb.co.uk"
] | chris@chris-lamb.co.uk |
86b92b8f212a1e0ccacfc949fefe1da3ffb5b062 | 6dd5bfe305bfc8d7fccf1f9bd6b3ec2250fc574c | /tensorflow_probability/python/bijectors/cholesky_outer_product_test.py | 12b437fe8c7502373507a9937c4af664181bef2f | [
"Apache-2.0"
] | permissive | snehil03july/probability | dd38cf7abba01b6702362476150d67092ce754b2 | 5f576230f1e261a823e20a49c442ff38c8f381d3 | refs/heads/master | 2020-03-28T10:24:46.378464 | 2018-09-08T22:34:22 | 2018-09-08T22:34:25 | 148,106,347 | 1 | 0 | Apache-2.0 | 2018-09-10T06:01:26 | 2018-09-10T06:01:26 | null | UTF-8 | Python | false | false | 6,045 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python import bijectors as tfb
class CholeskyOuterProductBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = X @ X.T transformation."""
def testBijectorMatrix(self):
with self.test_session():
bijector = tfb.CholeskyOuterProduct(validate_args=True)
self.assertEqual("cholesky_outer_product", bijector.name)
x = [[[1., 0], [2, 1]], [[np.sqrt(2.), 0], [np.sqrt(8.), 1]]]
y = np.matmul(x, np.transpose(x, axes=(0, 2, 1)))
# Fairly easy to compute differentials since we have 2x2.
dx_dy = [[[2. * 1, 0, 0],
[2, 1, 0],
[0, 2 * 2, 2 * 1]],
[[2 * np.sqrt(2.), 0, 0],
[np.sqrt(8.), np.sqrt(2.), 0],
[0, 2 * np.sqrt(8.), 2 * 1]]]
ildj = -np.sum(
np.log(np.asarray(dx_dy).diagonal(
offset=0, axis1=1, axis2=2)),
axis=1)
self.assertAllEqual((2, 2, 2), bijector.forward(x).get_shape())
self.assertAllEqual((2, 2, 2), bijector.inverse(y).get_shape())
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
ildj,
self.evaluate(
bijector.inverse_log_det_jacobian(
y, event_ndims=2)), atol=0., rtol=1e-7)
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(
y, event_ndims=2)),
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=2)),
atol=0.,
rtol=1e-7)
def testNoBatchStaticJacobian(self):
x = np.eye(2)
bijector = tfb.CholeskyOuterProduct()
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=2)))
def testNoBatchDynamicJacobian(self):
x = np.eye(2)
bijector = tfb.CholeskyOuterProduct()
x_pl = tf.placeholder(tf.float32)
with self.test_session():
log_det_jacobian = bijector.forward_log_det_jacobian(x_pl, event_ndims=2)
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
log_det_jacobian.eval({x_pl: x}))
def testNoBatchStatic(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.test_session() as sess:
y_actual = tfb.CholeskyOuterProduct().forward(x=x)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertAllEqual([2, 2], y_actual.get_shape())
self.assertAllEqual([2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testNoBatchDeferred(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.test_session() as sess:
x_pl = tf.placeholder(tf.float32)
y_pl = tf.placeholder(tf.float32)
y_actual = tfb.CholeskyOuterProduct().forward(x=x_pl)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchStatic(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.test_session() as sess:
y_actual = tfb.CholeskyOuterProduct().forward(x=x)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertEqual([2, 2, 2], y_actual.get_shape())
self.assertEqual([2, 2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchDeferred(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.test_session() as sess:
x_pl = tf.placeholder(tf.float32)
y_pl = tf.placeholder(tf.float32)
y_actual = tfb.CholeskyOuterProduct().forward(x=x_pl)
x_actual = tfb.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
if __name__ == "__main__":
tf.test.main()
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
59225ea2848e31f2eec61a545e3c036792b4f7a2 | 6db515644769c94166d2023b05c1f5ea57d3df51 | /blog/migrations/0001_initial.py | 090c4757842dddd9a2cb5656d58f7707b24683fb | [] | no_license | toastding/footprint | fe7da2340e826438d1cb17d18a5b1bdf2018d8a0 | e9af8163706efdce8e5732e9dfaedd6ecb2fb445 | refs/heads/master | 2022-11-17T22:38:40.539558 | 2020-07-12T04:43:37 | 2020-07-12T04:43:37 | 278,774,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | # Generated by Django 3.0.8 on 2020-07-11 08:56
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(help_text='Enter your blog text here.', max_length=2000)),
('post_date', models.DateField(default=datetime.date.today)),
],
options={
'ordering': ['-post_date'],
},
),
migrations.CreateModel(
name='BlogComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(help_text='Enter comment about blog here..', max_length=1000)),
('post_date', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Blog')),
],
options={
'ordering': ['post_date'],
},
),
migrations.CreateModel(
name='BlogAuthor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(help_text='Enter your bio details here.', max_length=400)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['user', 'bio'],
},
),
migrations.AddField(
model_name='blog',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='blog.BlogAuthor'),
),
]
| [
"ding02211995@gmail.com"
] | ding02211995@gmail.com |
9f08f99d478449a90db4b6dd7d4a9e87595f22d8 | 1316cd6763e784811c769c1de577235c921af0de | /Widgets/Striptool/setup.py | 3766198b8ae55d1005c727f74a195f038871b27f | [] | no_license | VELA-CLARA-software/Software | a6fb6b848584e5893fd6939a447d23134ce636cc | 2e2a88ac0b2b03a495c868d2e11e6481e05097c3 | refs/heads/master | 2023-02-05T07:40:58.260798 | 2023-01-27T09:39:09 | 2023-01-27T09:39:09 | 69,860,536 | 7 | 3 | null | 2021-04-07T14:17:07 | 2016-10-03T10:20:46 | Mathematica | UTF-8 | Python | false | false | 312 | py | from distutils.core import setup
setup(
name='Striptool',
version='1.1',
packages=[''],
url='',
license='',
author='jkj62',
author_email='james.jones@stfc.ac.uk',
description='Strip tool widget for PyQt4',
install_requires=['pyqtgraph>=0.10.0','numpy','peakutils>=1.0.3'],
)
| [
"james.jones@stfc.ac.uk"
] | james.jones@stfc.ac.uk |
a508e4bb0d77eab214532f5da0b2c7c0c380210a | c068f19f14749c7e29a5450871da81d0e4b57348 | /inasilentway/collection.py | d0443b44fd449af559bde16b95b272f8985e3139 | [] | no_license | davidmiller/inasilentway | 42b94adea60143a70e7fa82ecd8504fa6a1142f1 | c88ccd1cc0f79aa167f8a2ff082a20a043d64556 | refs/heads/master | 2022-12-10T11:47:51.052379 | 2021-01-22T12:41:33 | 2021-01-22T12:41:33 | 161,032,907 | 0 | 0 | null | 2022-12-08T01:29:25 | 2018-12-09T11:38:22 | HTML | UTF-8 | Python | false | false | 5,074 | py | # """
# Collection utilities
# """
# import pickle
# import time
# import ffs
# import discogs_client as discogs
# from inasilentway import utils
# HERE = ffs.Path.here()
# CACHE_PATH = HERE / '../data/collection.pickle'
# def get_collection():
# if CACHE_PATH:
# with open(CACHE_PATH.abspath, 'rb') as fh:
# return pickle.load(fh)
# else:
# print("No local record cache larry :(")
# def save_record_from_discogs_data(record):
# """
# Given a Discogs record instance, save it to our database
# """
# from inasilentway import models
# artists = []
# for artist in record.release.artists:
# art, _ = models.Artist.objects.get_or_create(discogs_id=artist.id)
# art.name = artist.name
# try:
# art.images = artist.images
# art.url = artist.url
# art.profile = artist.profile
# art.urls = artist.urls
# except discogs.exceptions.HTTPError:
# pass # 404 on the artist images happens sometimes apparently?
# art.save()
# artists.append(art)
# label, _ = models.Label.objects.get_or_create(
# discogs_id=record.release.labels[0].id)
# label.name = record.release.labels[0].name
# label.save()
# rec, _ = models.Record.objects.get_or_create(discogs_id=record.release.id)
# for artist in artists:
# rec.artist.add(artist)
# for genre in record.release.genres:
# g, _ = models.Genre.objects.get_or_create(name=genre)
# rec.genres.add(g)
# if record.release.styles:
# for style in record.release.styles:
# s, _ = models.Style.objects.get_or_create(name=style)
# rec.styles.add(s)
# api = discogs.Client(
# 'inasilentway',
# user_token='PYffmSZeeqHUYaMWMEjwGhqfSWtOBFcPOggoixmD'
# )
# api_release = api.release(rec.discogs_id)
# rec.thumb = api_release.thumb
# rec.label = label
# rec.title = record.release.title
# rec.year = record.release.year
# rec.images = record.release.images
# rec.country = record.release.country
# rec.notes = record.release.notes
# rec.formats = '{}, {}'.format(
# record.release.formats[0]['name'],
# ' '.join(record.release.formats[0]['descriptions'])
# )
# rec.url = record.release.url
# rec.status = record.release.status
# rec.save()
# # Tracks don't have an ID so kill them all
# models.Track.objects.filter(record=rec).delete()
# for track in record.release.tracklist:
# models.Track(
# record=rec,
# duration=track.duration,
# position=track.position,
# title=track.title
# ).save()
# return rec
# """
# Commandline entrypoints from ./shhh (python -m inasilentway )
# """
# def download(args):
# """
# Commandline entrypoint to download the collection if we
# have less records in it than in our local cache
# TODO: Convert to Django?
# """
# records = get_collection()
# if records is None:
# records = []
# api = discogs.Client('Inasilentway')
# me = api.user('thatdavidmiller')
# if len(records) != me.num_collection:
# print('fetching data...')
# records = [r for r in me.collection_folders[0].releases]
# print('fetched record data')
# print('{} records'.format(len(records)))
# with open(CACHE_PATH.abspath, 'wb') as fh:
# pickle.dump(records, fh)
# print('written record data to local cache')
# def load_django(args):
# """
# Commandline entrypoint to load our collection into Django
# """
# utils.setup_django()
# download(None)
# collection = get_collection()
# ADDED = 0
# def add_record(record):
# rec = save_record_from_discogs_data(record)
# print('Added {}'.format(rec.title))
# from inasilentway import models
# for record in collection:
# print('Looking at {}'.format(record.release.title))
# # TODO: Undo this when we've figured out how to not freak out
# # the discogs API limits
# if models.Record.objects.filter(discogs_id=record.release.id).exists():
# print(
# 'Found {} in local database, skipping'.format(
# record.release.title
# )
# )
# continue
# try:
# add_record(record)
# ADDED += 1
# if ADDED == 100:
# break
# except discogs.exceptions.HTTPError:
# print(
# "Got a quick requests warning, sleep for a bit and retry once"
# )
# time.sleep(60)
# add_record(record)
# ADDED += 1
# if ADDED == 100:
# break
# # Prevent HTTPError: 429: You are making requests too quickly.
# time.sleep(2)
# print('Count: {}'.format(models.Record.objects.count()))
| [
"david@deadpansincerity.com"
] | david@deadpansincerity.com |
8571d468f570736fbf2eb36541b40bad5f1b0fee | 1599f9a44e5ec492c019036195a769fa3ed9e04b | /tests/test_api/test_v1/test_views/test_index.py | 652ea7f07f022c95414ee40b62085c394159254c | [] | no_license | Mikaelia/AirBnB_clone_v3 | 63b70a2c8874e9d6d6e60ab3c6b2d1511136303a | f628759076ccdd2e5aecf61c8d079505b4d17412 | refs/heads/master | 2020-03-26T22:31:17.315057 | 2018-10-09T01:10:30 | 2018-10-09T01:10:30 | 145,465,466 | 0 | 0 | null | 2018-08-29T19:19:21 | 2018-08-20T20:09:48 | Python | UTF-8 | Python | false | false | 1,334 | py | #!/usr/bin/python3
"""
Unit Test for api v1 Flask App
"""
import inspect
import pep8
import web_flask
import unittest
from os import stat
import api
module = api.v1.views.index
class TestIndexDocs(unittest.TestCase):
"""Class for testing Hello Route docs"""
all_funcs = inspect.getmembers(module, inspect.isfunction)
def test_doc_file(self):
"""... documentation for the file"""
actual = module.__doc__
self.assertIsNotNone(actual)
def test_all_function_docs(self):
"""... tests for ALL DOCS for all functions"""
all_functions = TestIndexDocs.all_funcs
for function in all_functions:
self.assertIsNotNone(function[1].__doc__)
def test_pep8(self):
"""... tests if file conforms to PEP8 Style"""
pep8style = pep8.StyleGuide(quiet=True)
errors = pep8style.check_files(['api/v1/views/index.py'])
self.assertEqual(errors.total_errors, 0, errors.messages)
def test_file_is_executable(self):
"""... tests if file has correct permissions so user can execute"""
file_stat = stat('api/v1/views/index.py')
permissions = str(oct(file_stat[0]))
actual = int(permissions[5:-2]) >= 5
self.assertTrue(actual)
if __name__ == '__main__':
"""
MAIN TESTS
"""
unittest.main
| [
"328@holbertonschool.com"
] | 328@holbertonschool.com |
f7a3f1a1e14e46defed815dd909775f9fd84d89e | 0d5de943909877c01b485d8a918d8bef0cf9e196 | /plugins/RemoveTriggerArea/PluginRemoveTriggerArea.py | db28cd82e00fc81726f38db0b6d1c935c1c47617 | [
"MIT"
] | permissive | baverman/scribes-goodies | 31e2017d81f04cc01e9738e96ceb19f872a3d280 | f6ebfe62e5103d5337929648109b4e610950bced | refs/heads/master | 2021-01-21T10:13:08.397980 | 2013-09-25T16:33:05 | 2013-09-25T16:33:05 | 854,207 | 2 | 1 | null | 2013-09-25T16:33:05 | 2010-08-22T03:12:39 | Python | UTF-8 | Python | false | false | 802 | py | from scribes.helpers import Trigger, TriggerManager
import subprocess
from gettext import gettext as _
name = "Remove trigger area plugin"
authors = ["Anton Bobrov <bobrov@vl.ru>"]
version = 0.1
autoload = True
class_name = "TriggerAreaPlugin"
short_description = "Removes trigger area"
long_description = "Removes trigger area"
trigger = Trigger("show-full-view", "<ctrl><alt>m",
_("Show editor's fullview"), _("Miscellaneous Operations"))
class TriggerAreaPlugin(object):
def __init__(self, editor):
self.editor = editor
self.triggers = TriggerManager(editor)
self.triggers.connect_triggers(self)
@trigger
def activate(self, sender):
self.editor.show_full_view()
return False
def load(self): pass
def unload(self): pass
| [
"bobrov@vl.ru"
] | bobrov@vl.ru |
ba725d409c9ca8e433e32f76419bbea9c92d6199 | 6cad5c613306789b9bd6387c2e7af02515b1c0ad | /django_document/inheritance/migrations/0002_auto_20171011_0700.py | 0ba7650807e9ff33a1802862fd91abed05693195 | [] | no_license | Isaccchoi/django_document_project | ead5eb7b2e932ae5401d5a3cdb3672d3dfd8f9f5 | 980f25c98f99994e6148af16ed82ae4f12d50870 | refs/heads/master | 2021-05-08T06:12:51.261138 | 2017-10-13T05:14:58 | 2017-10-13T05:14:58 | 106,355,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-11 07:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('inheritance', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='student',
name='school',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inheritance.School'),
),
migrations.AddField(
model_name='teacher',
name='school',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='inheritance.School'),
),
]
| [
"isaccchoi@naver.com"
] | isaccchoi@naver.com |
b76703edb54f9d342ff986b4ad2451ffd03e6498 | f281c9ecd48aedd30469cfbd556bc3319cd8419d | /sendmail/src/main.py | 0b5456e505640bb912ccd4735ffc2480b6fa88dd | [] | no_license | youerning/blog | 5d5edeb4f836d233a4119796f38fc4e33531714e | 59c3704cf5a77bba70a48a5d09db9b165ea59d4b | refs/heads/master | 2023-08-31T04:08:16.461923 | 2023-08-27T01:28:39 | 2023-08-27T01:28:39 | 114,074,235 | 183 | 105 | null | 2023-05-05T02:36:52 | 2017-12-13T04:35:00 | HTML | UTF-8 | Python | false | false | 6,488 | py | # -*- coding: UTF-8 -*-
# @author youerning
# @email 673125641@qq.com
import sys
import base64
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from collections import defaultdict
from io import BytesIO
from os import path
# 第三方库
from jinja2 import Template
from PIL import Image
# 发送邮件所需的信息
mail_to = "<收件人邮箱地址>"
smtp_host = "<邮件服务器>"
smtp_username = "<用户名>"
smtp_password = "<密码>"
subject = "演示邮件"
from_ = "邮件机器人"
# 用于发个收件人的逗号
COMMASPACE = ","
EMAIL_TEMPLATE = """<html>
<head>
<style type="text/css">
table
{
border-collapse: collapse;
margin: 0 auto;
text-align: center;
}
table td, table th
{
border: 1px solid #cad9ea;
color: #666;
height: 30px;
}
table thead th
{
background-color: #CCE8EB;
width: 100px;
}
table tr:nth-child(odd)
{
background: #fff;
}
table tr:nth-child(even)
{
background: #F5FAFA;
}
</style>
</head>
<body>
<p>一共有以下{{record_size}}条数据</p>
<table width="90%" class="table">
<thead>
<tr>
{% for label in labels %}
<th>{{label}}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for item in items %}
<tr>
{% for value in item %}
<td>{{value}}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
</html>"""
EMAIL_IMAGE_TEMPLATE = """<html>
<head>
<title>Page Title</title>
</head>
<body>
<h3>这是一张图片</h3>
<p><img src="cid:{{image_name}}" height="112" width="200" ></p>
</body>
</html>
"""
EMAIL_ONLINE_IMAGE_TEMPLATE = """<html>
<head>
<title>Page Title</title>
</head>
<body>
<h3>这是一张图片</h3>
<p><img src="cid:{{image_name}}" ></p>
</body>
</html>
"""
def create_image_eamil_contant(fp):
tpl = Template(EMAIL_IMAGE_TEMPLATE)
if not path.exists(fp):
sys.exit("要发送的本地图片不存在")
msg = MIMEMultipart("related")
image_name = "demo"
with open(fp, "rb") as rf:
mime_image = MIMEImage(rf.read())
# 注意: 一定需要<>括号
mime_image.add_header("Content-ID", "<%s>" % image_name)
msg.attach(mime_image)
# 渲染邮件文本内容
text = tpl.render(image_name=image_name)
msg_alternative = MIMEMultipart("alternative")
msg_alternative.attach(MIMEText(text, "html", "utf-8"))
msg.attach(msg_alternative)
return msg
def create_online_image_content():
from PIL import Image
tpl = Template(EMAIL_ONLINE_IMAGE_TEMPLATE)
fp = "demo_base64.txt"
if not path.exists(fp):
sys.exit("要发送的base64编码的图片不存在")
msg = MIMEMultipart("related")
image_name = "demo"
with open(fp, "rb") as rf:
base64_data = rf.read()
img_data = base64.b64decode(base64_data)
# 因为open方法需要一个file-like文件对象,而我们解码后的对象类型是bytes类型
# bytes类型没有文件对象的read, close方法,所以我们需要通过BytesIO对象包装一下,它会返回一个file-like文件对象
img = Image.open(BytesIO(img_data))
img_width, img_height = img.size
repeat_times = 5
# compose images
ret_img = Image.new(img.mode, (img_width, img_height * repeat_times))
for index in range(repeat_times):
ret_img.paste(img, box=(0, index * img_height))
# 因为MIMEImage需要一个bytes对象,所以们需要获取图片编码后的二进制数据而不是图片的array数据
img_bytes = BytesIO()
# 如果不指定图片格式,会因为没有文件名而报错
ret_img.save(img_bytes, "png")
mime_image = MIMEImage(img_bytes.getvalue())
# 注意: 一定需要<>括号
mime_image.add_header("Content-ID", "<%s>" % image_name)
msg.attach(mime_image)
# 渲染邮件文本内容
text = tpl.render(image_name=image_name)
msg_alternative = MIMEMultipart("alternative")
msg_alternative.attach(MIMEText(text, "html", "utf-8"))
msg.attach(msg_alternative)
return msg
def create_html_content():
tpl = Template(EMAIL_TEMPLATE)
record_size = 10
label_size = 5
labels = ["label-%s" % i for i in range(label_size)]
items = []
for _ in range(record_size):
item = ["item-%s" % value_index for value_index in range(label_size)]
items.append(item)
text = tpl.render(record_size=record_size, items=items, labels=labels)
msg = MIMEText(text, "html", "utf-8")
return msg
def send_email(msg, mail_to, smtp_host, smtp_username, smtp_password, subject, from_):
msg["Subject"] = Header(subject, "utf-8")
msg["From"] = Header(from_, "utf-8")
if not isinstance(mail_to, list):
mail_to = [mail_to]
msg["To"] = COMMASPACE.join(mail_to)
try:
print("准备连接smtp邮件服务器: %s" % smtp_host)
client = smtplib.SMTP(smtp_host)
print("连接成功")
# client = smtplib.SMTP("localhost")
# client.set_debuglevel(1)
# print(self.mail_user, self.mail_pass)
client.login(smtp_username, smtp_password)
print("登录成功")
# print("=====>", self.mail_from, mail_to)
print("通过邮箱[%s]发送邮件给 %s" % (smtp_username, COMMASPACE.join(mail_to)))
client.sendmail(smtp_username, mail_to, msg.as_string())
print("发送成功...")
return True
except Exception:
print("发送邮件失败")
finally:
client.quit()
def send_local_image_email():
msg = create_image_eamil_contant("demo.jpg")
send_email(msg,mail_to, smtp_host, smtp_username, smtp_password, subject, from_)
def send_online_image_email():
msg = create_online_image_content()
send_email(msg,mail_to, smtp_host, smtp_username, smtp_password, subject, from_)
def send_html_content():
msg = create_html_content()
send_email(msg,mail_to, smtp_host, smtp_username, smtp_password, subject, from_)
def main():
pass
if __name__ == "__main__":
# send_local_image_email()
# send_online_image_email()
send_html_content() | [
"673125641@qq.com"
] | 673125641@qq.com |
7b59d635ceceda572349f0002f30d490b644440d | 8f8d9428c68aa6bb1e6d131c505c217403979955 | /Python/0053. 螺旋三角.py | 442375ec79cdb6f9c5f503f591d033fb29160d4a | [] | no_license | yang4978/Huawei-OJ | fb3799c0f807b853fcfd4574b809fed5740fc6ea | ea3cccb2b070545574fadd64aecd38f73804361d | refs/heads/main | 2023-04-17T22:04:44.526675 | 2021-04-30T03:40:26 | 2021-04-30T03:40:26 | 337,046,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | # If you need to import additional packages or classes, please import here.
def func():
# please define the python3 input here.
# For example: a,b = map(int, input().strip().split())
# please finish the function body here.
# please define the python3 output here. For example: print().
while True:
try:
n = int(input())
arr = [[0]*n for _ in range(n)]
cnt = 1
for i in range(n):
if i%3 == 0:
for j in range(i//3,n-1-i//3*2):
arr[i//3][j] = cnt
cnt += 1
elif i%3 == 1:
for j in range(i//3,n-i//3*2):
arr[j][n-j-1-i//3] = cnt
cnt += 1
else:
for j in range(n-i//3*2-2,i//3,-1):
arr[j][i//3] = cnt
cnt += 1
if n%3 == 1:
arr[n//3][n//3] = (1+n)*n//2
for l in arr:
for i in range(n):
if i == 0:
print(l[i],end='')
elif l[i]:
print('',l[i],end='')
print('')
except EOFError:
break
if __name__ == "__main__":
func()
| [
"noreply@github.com"
] | yang4978.noreply@github.com |
30a24d477498b4413500a53839689643274b89e7 | 4fc73cbe9e79974cde44d674b7c810edc9b07cd2 | /puyopuyoall/feverclass.py | ebb79ed88aef6c45b799c740ea0ede9ca7610cc8 | [] | no_license | yuufujikata2/Games | 36fbcdfbba976cc6b1850fd5f950bf269f81248d | abc2177023653247ebe1abb9cab172db31d038dc | refs/heads/master | 2020-07-30T14:07:54.466545 | 2019-09-23T03:39:56 | 2019-09-23T03:39:56 | 210,257,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py |
class Fever():
def __init__(self):
self.haichi=[[0 for i in range(8)] for j in range(15)]
self.fegauge=0
self.fetime=15
self.fehantei=None
self.ferensasuu=5
def feverin(self,field,ferensatane):
self.haichi=[[s for s in m] for m in field.haichi]
self.fehantei=True
field.haichi=[[s for s in m] for m in ferensatane.rensahaichi(self.ferensasuu)]
self.fegauge=0
def fevertuduki(self,field,ferensatane):
field.haichi=[[s for s in m] for m in ferensatane.rensahaichi(self.ferensasuu)]
def feverout(self,field):
field.haichi=[[s for s in m] for m in self.haichi]
self.fehantei=False
self.fetime=15
| [
"yuu.1201.soccer.f@gmail.com"
] | yuu.1201.soccer.f@gmail.com |
a67a7e3853351430549f23b2546bf23d0b9996aa | d6c84c8d2568fdbf898f0f477f0f666ad5769f53 | /polybot/planning.py | 64baebd4e075375fdba645e5bae56a431858ddad | [
"Apache-2.0"
] | permissive | cwang344/polybot-web-service | ae533535e834da10da3b4ddff1a458f89d9dbb89 | 0aebb082829e0a8b4ac4a6d15c5da3f4458ab143 | refs/heads/master | 2023-07-18T07:03:05.577043 | 2021-08-20T14:47:51 | 2021-08-20T14:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,617 | py | """Definition for the class that runs the optimization routine.
We describe the policy for starting new runs by implementing a
`Colmena <http://colmena.rtfd.org/>`_ Thinker class.
"""
import random
from pathlib import Path
from typing import Dict, Callable, Union
import requests
from colmena.redis.queue import ClientQueues, TaskServerQueues
from colmena.task_server import ParslTaskServer
from colmena.thinker import BaseThinker, result_processor
from colmena.models import Result
from parsl import Config, ThreadPoolExecutor
from pydantic import BaseModel, Field, AnyHttpUrl
from polybot.models import SampleTemplate
from polybot.robot import send_new_sample
class OptimizationProblem(BaseModel):
"""Define the optimization problem and any settings for the planning algorithm."""
# Define the search space
search_template_path: Union[AnyHttpUrl, Path] = Field(
..., description="Path to the sample template. Defines the input variables and the search space"
" for the optimization. Can be either a path on the local filesystem or a HTTP URL")
# Options the planning algorithm
planner_options: Dict = Field(default_factory=dict, description='Any options for the planning algorithm')
# Define the optimization metric
# TODO (wardlt): Should we dare cross into multi-objective optimization in this document or leave it up to
# the implementation of the Planner?
output: str = Field(..., description="Output variable. Name of values within the `processed_outputs` dictionary")
maximize: bool = Field(True, description="Whether to maximize (or minimize) the target function")
@property
def search_template(self) -> SampleTemplate:
"""Template that defines the sample search space"""
if isinstance(self.search_template_path, str):
# Download it to disk
reply = requests.get(self.search_template_path)
return SampleTemplate.parse_obj(reply.json())
else:
return SampleTemplate.parse_file(self.search_template_path)
class Config:
extras = 'forbid'
class BasePlanner(BaseThinker):
"""Base class for planning algorithms based on the `Colmena BaseThinker
<https://colmena.readthedocs.io/en/latest/how-to.html#creating-a-thinker-application>`_ class.
Subclasses should provide the optimization specification to the initializer of this class
so that it is available as the `opt_spec` attribute. Additional options to the planner
should be set using keyword arguments to the initializer, so that we can define them in the
:class:`OptimizationProblem` JSON document.
There are no requirements on how you implement the planning algorithm, but you may at least want an agent
waiting for results with the "robot" topic. For example,
.. code: python
@result_processor(topic='robot')
def robot_result_handler(self, _: Result):
output = self.opt_spec.get_sample_template()
send_send_new_sample(output)
"""
def __init__(self, queues: ClientQueues, opt_spec: OptimizationProblem, daemon: bool = False):
super().__init__(queues, daemon=daemon)
self.opt_spec = opt_spec
class RandomPlanner(BasePlanner):
"""Submit a randomly-selected point from the search space each time a new result is completed"""
@result_processor(topic='robot')
def robot_result_handler(self, _: Result):
"""Generate a new task to be run on the robot after one completes
Args:
_: Result that is not actually used for now.
"""
# Make a choice for each variable
output = self.opt_spec.search_template.create_new_sample()
for key, acceptable_values in self.opt_spec.search_template.list_acceptable_input_values().items():
output.inputs[key] = random.choice(acceptable_values)
# Send it to the robot
send_new_sample(output)
return
def _execute(f: Callable):
"""Debug function"""
return f()
def build_thread_pool_executor(queues: TaskServerQueues) -> ParslTaskServer:
"""Builds a task server that runs a single task on a local thread.
This server is primarily meant for testing, and has a single task,
"execute," that receives a Callable and executes it remotely.
Args:
queues: Queues to use to communicate
Returns:
A configured task server
"""
config = Config(executors=[ThreadPoolExecutor(max_threads=1)])
return ParslTaskServer(
queues=queues,
methods=[_execute],
config=config
)
| [
"ward.logan.t@gmail.com"
] | ward.logan.t@gmail.com |
0c5ed03200eea5fa07824fedb9ecfa87e7fe52e8 | a563a95e0d5b46158ca10d6edb3ca5d127cdc11f | /tccli/services/iotexplorer/v20190423/help.py | dff03e15e9ea50a3d1125332751988261fcb441f | [
"Apache-2.0"
] | permissive | SAIKARTHIGEYAN1512/tencentcloud-cli | e93221e0a7c70f392f79cda743a86d4ebbc9a222 | d129f1b3a943504af93d3d31bd0ac62f9d56e056 | refs/heads/master | 2020-08-29T09:20:23.790112 | 2019-10-25T09:30:39 | 2019-10-25T09:30:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,925 | py | # -*- coding: utf-8 -*-
DESC = "iotexplorer-2019-04-23"
INFO = {
"ModifyStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "ProductName",
"desc": "产品名称"
},
{
"name": "ProductDesc",
"desc": "产品描述"
},
{
"name": "ModuleId",
"desc": "模型ID"
}
],
"desc": "提供修改产品的名称和描述等信息的能力"
},
"DeleteStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
}
],
"desc": "提供删除某个项目下产品的能力"
},
"DescribeStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
}
],
"desc": "提供查看茶品详细信息的能力,包括产品的ID、数据协议、认证类型等重要参数"
},
"DescribeDeviceData": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
}
],
"desc": "根据设备产品ID、设备名称,获取设备上报的属性数据。"
},
"CreateStudioProduct": {
"params": [
{
"name": "ProductName",
"desc": "产品名称"
},
{
"name": "CategoryId",
"desc": "产品分组模板ID"
},
{
"name": "ProductType",
"desc": "产品类型"
},
{
"name": "EncryptionType",
"desc": "加密类型"
},
{
"name": "NetType",
"desc": "连接类型"
},
{
"name": "DataProtocol",
"desc": "数据协议"
},
{
"name": "ProductDesc",
"desc": "产品描述"
},
{
"name": "ProjectId",
"desc": "产品的项目ID"
}
],
"desc": "为用户提供新建产品的能力,用于管理用户的设备"
},
"DescribeDevice": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名"
}
],
"desc": "用于查看某个设备的详细信息"
},
"SearchStudioProduct": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
},
{
"name": "ProductName",
"desc": "产品名称"
},
{
"name": "Limit",
"desc": "列表Limit"
},
{
"name": "Offset",
"desc": "列表Offset"
},
{
"name": "DevStatus",
"desc": "产品Status"
}
],
"desc": "提供根据产品名称查找产品的能力"
},
"GetProjectList": {
"params": [
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "个数限制"
}
],
"desc": "提供查询用户所创建的项目列表查询功能。"
},
"DescribeDeviceDataHistory": {
"params": [
{
"name": "MinTime",
"desc": "区间开始时间"
},
{
"name": "MaxTime",
"desc": "区间结束时间"
},
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
},
{
"name": "FieldName",
"desc": "属性字段名称"
},
{
"name": "Limit",
"desc": "返回条数"
},
{
"name": "Context",
"desc": "检索上下文"
}
],
"desc": "获取设备在指定时间范围内上报的历史数据。"
},
"DeleteDevice": {
"params": [
{
"name": "ProductId",
"desc": "产品ID。"
},
{
"name": "DeviceName",
"desc": "设备名称。"
}
],
"desc": "删除设备"
},
"ModifyProject": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
},
{
"name": "ProjectName",
"desc": "项目名称"
},
{
"name": "ProjectDesc",
"desc": "项目描述"
}
],
"desc": "修改项目"
},
"CreateDevice": {
"params": [
{
"name": "ProductId",
"desc": "产品ID。"
},
{
"name": "DeviceName",
"desc": "设备名称。"
}
],
"desc": "创建设备"
},
"ListEventHistory": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
},
{
"name": "Type",
"desc": "搜索的事件类型"
},
{
"name": "StartTime",
"desc": "起始时间, 为0 表示 当前时间 - 24h"
},
{
"name": "EndTime",
"desc": "结束时间, 为0 表示当前时间"
},
{
"name": "Context",
"desc": "搜索上下文, 用作查询游标"
},
{
"name": "Size",
"desc": "单次获取的历史数据项目的最大数量"
}
],
"desc": "获取设备的历史事件"
},
"GetDeviceList": {
"params": [
{
"name": "ProductId",
"desc": "需要查看设备列表的产品 ID"
},
{
"name": "Offset",
"desc": "分页偏移"
},
{
"name": "Limit",
"desc": "分页的大小,数值范围 10-100"
}
],
"desc": "用于查询某个产品下的设备列表"
},
"ReleaseStudioProduct": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DevStatus",
"desc": "产品DevStatus"
}
],
"desc": "产品开发完成并测试通过后,通过发布产品将产品设置为发布状态"
},
"ModifyModelDefinition": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "ModelSchema",
"desc": "数据模板定义"
}
],
"desc": "提供修改产品的数据模板的能力"
},
"DescribeProject": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
}
],
"desc": "查询项目详情"
},
"DescribeModelDefinition": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
}
],
"desc": "查询产品配置的数据模板信息"
},
"CreateProject": {
"params": [
{
"name": "ProjectName",
"desc": "项目名称"
},
{
"name": "ProjectDesc",
"desc": "项目描述"
}
],
"desc": "为用户提供新建项目的能力,用于集中管理产品和应用。"
},
"DeleteProject": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
}
],
"desc": "提供删除某个项目的能力"
},
"ControlDeviceData": {
"params": [
{
"name": "ProductId",
"desc": "产品ID"
},
{
"name": "DeviceName",
"desc": "设备名称"
},
{
"name": "Data",
"desc": "属性数据"
},
{
"name": "Method",
"desc": "请求类型"
},
{
"name": "DeviceId",
"desc": "设备ID,该字段有值将代替 ProductId/DeviceName"
}
],
"desc": "根据设备产品ID、设备名称,设置控制设备的属性数据。"
},
"GetStudioProductList": {
"params": [
{
"name": "ProjectId",
"desc": "项目ID"
},
{
"name": "DevStatus",
"desc": "产品DevStatus"
},
{
"name": "Offset",
"desc": "Offset"
},
{
"name": "Limit",
"desc": "Limit"
}
],
"desc": "提供查询某个项目下所有产品信息的能力。"
}
} | [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
84c6951f02125c18e80e44560c2b69348c7b7a14 | 45a0434de7cb5aaf51f372a9ea39c2e62528e8d7 | /decoder_hier_fsoftmax_v1.py | bbba0b4556ec3440adaff9eb6a5e5f96ff1e0890 | [] | no_license | hongtaowutj/Seq2Seq-Keyphrase-Generation | 44b5b24f3af7a85c24fc5ef231c53c1dac7e48ff | 6f2d08222b108b543b7628b32e98480f2e3a32b0 | refs/heads/master | 2020-03-27T10:43:09.941194 | 2018-07-23T07:21:35 | 2018-07-23T07:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,457 | py | import os
import sys
sys.path.append(os.getcwd())
import numpy as np
from datetime import datetime
import time
import tensorflow as tf
from keras.models import Model
import keras.backend as K
from keras.models import load_model
from keras.utils import to_categorical
from utils.data_connector import DataConnector
from utils.true_keyphrases import TrueKeyphrases
from utils.decoding_fullsoftmax import DecodingSoftmax
from models.hier_seq2seq import HierarchyFullSoftmax
def decoder(params):
data_path = params['data_path']
preprocessed_data = params['preprocessed_data']
preprocessed_v2 = params['preprocessed_v2']
model_path = params['model_path']
result_path = params['result_path']
decode_path = params['decode_path']
file_name = params['file_name']
weights = params['weights']
encoder_length = params['encoder_length']
decoder_length = params['decoder_length']
max_sents = params['max_sents']
embedding_dim = params['embedding_dim']
birnn_dim = params['birnn_dim']
rnn_dim = params['rnn_dim']
vocab_size = params['vocab_size']
'''
Reading vocabulary dictionaries
'''
indices_words_connector = DataConnector(preprocessed_v2, 'all_indices_words_sent_fsoftmax.pkl', data=None)
indices_words_connector.read_pickle()
indices_words = indices_words_connector.read_file
words_indices_connector = DataConnector(preprocessed_v2, 'all_words_indices_sent_fsoftmax.pkl', data=None)
words_indices_connector.read_pickle()
words_indices = words_indices_connector.read_file
y_test_true_connector = DataConnector(data_path, 'test_sent_output_tokens.npy', data=None)
y_test_true_connector.read_numpys()
y_test_true = y_test_true_connector.read_file
# non-paired data set
X_test_connector = DataConnector(preprocessed_data, 'X_test_pad_sent_fsoftmax.npy', data=None)
X_test_connector.read_numpys()
X_test = X_test_connector.read_file
'''
Decoder model for inference stage
Return: generated keyphrases
'''
full_softmax = HierarchyFullSoftmax(encoder_length=encoder_length, decoder_length=decoder_length, max_sents=max_sents, embedding_dim=embedding_dim, birnn_dim=birnn_dim, rnn_dim=rnn_dim, vocab_size=vocab_size, filepath=result_path, filename=file_name, batch_train_iter=None, batch_val_iter=None, batch_size=None, steps_epoch=None, val_steps=None, epochs=None)
# skeleton of model architecture
full_softmax.train_hier_seq2seq()
encoder_model = full_softmax.encoder_model
predict_softmax_model = full_softmax.predict_seq2seq(weights)
decoder_model = full_softmax.create_decoder_model()
# transform tokenized y_true (ground truth of keyphrases) into full sentences / keyphrases
keyphrases_transform = TrueKeyphrases(y_test_true)
keyphrases_transform.get_true_keyphrases()
keyphrases_transform.get_stat_keyphrases()
y_true = keyphrases_transform.y_true
max_kp_num = keyphrases_transform.max_kp_num
mean_kp_num = keyphrases_transform.mean_kp_num
std_kp_num = keyphrases_transform.std_kp_num
print("Maximum number of key phrases per document in corpus: %s" %max_kp_num)
sys.stdout.flush()
print("Average number of key phrases per document in corpus: %s" %mean_kp_num)
sys.stdout.flush()
print("Standard Deviation of number of key phrases per document in corpus: %s" %std_kp_num)
sys.stdout.flush()
# round up function for computing beam width
def roundup(x):
return x if x % 5 == 0 else x + 5 - x % 5
beam_width = int(roundup(mean_kp_num + (3 * std_kp_num)))
print("\nBeam width: %s\n" %beam_width)
sys.stdout.flush()
num_hypotheses = beam_width
print(str(datetime.now()))
sys.stdout.flush()
t0 = time.time()
print("Start decoding...")
sys.stdout.flush()
inference_mode = DecodingSoftmax(encoder_model=encoder_model, decoder_model=decoder_model, indices_words=indices_words, words_indices=words_indices, enc_in_seq=None, decoder_length=decoder_length, rnn_dim=rnn_dim, beam_width=beam_width, num_hypotheses=num_hypotheses, filepath=decode_path, filename=file_name)
t0_1 = time.time()
print("Start beam decoding...")
sys.stdout.flush()
beam_keyphrases = inference_mode.beam_decoder(X_test[:500])
beam_decode_connector = DataConnector(decode_path, 'beam_kp-%s.npy'%(file_name), beam_keyphrases)
beam_decode_connector.save_numpys()
t1_1 = time.time()
print("Beam decoding is done in %.3fsec" % (t1_1 - t0_1))
sys.stdout.flush()
| [
"i.nimah@tue.nl"
] | i.nimah@tue.nl |
b03e63e83a8119e43592f779267c3e1089933a42 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /vaufKtjX3gKoq9PeS_17.py | 188c5344f01609d7777059c7e888e3ce3952cdbd | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
def ohms_law(v, r, i):
if r and i and not v:
ans = round(r * i, 2)
elif v and i and not r:
ans = round(v / i, 2)
elif v and r and not i:
ans = round(v / r, 2)
else:
ans = 'Invalid'
return ans
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
bba7fcd21ff8aad07745a06ba5cf7bec4ede0975 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/1601.py | 4a308e37f8e46ee749375c4c4f01d664c3823c76 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | #!/usr/bin/env python
import sys
def main(argv=None):
if argv is None:
argv = sys.argv
T = int(sys.stdin.readline())
for t in xrange(T):
vrow = int(sys.stdin.readline())
cards1 = []
for i in xrange(4):
row = sys.stdin.readline()
if i == vrow - 1:
cards1 = map(int, row.split(" "))
vrow = int(sys.stdin.readline())
cards2 = []
for i in xrange(4):
row = sys.stdin.readline()
if i == vrow - 1:
cards2 = map(int, row.split(" "))
cards = cards1 + cards2
uniqueCount = len(set(cards))
outcome = ""
if uniqueCount == 8:
outcome = "Volunteer cheated!"
elif uniqueCount <= 6:
outcome = "Bad magician!"
else: # We're good!
cards.sort()
i = 0
while outcome == "":
if cards[i] == cards[i + 1]:
outcome = str(cards[i])
i += 1
print "Case #%d: %s" % (t + 1, outcome)
if __name__ == "__main__":
sys.exit(main())
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c4fd6c0e6b2e2d046e2e8e8768f03f52b6748480 | bb7d5f000de07cc2c458a64fd275f3b14701dea2 | /examples/Lottery.py | 3c002d7320f7fe471e8334c122caa8f4f5162c79 | [
"MIT"
] | permissive | timmy61109/Introduction-to-Programming-Using-Python | 201d532a5c041ed045939c10909de0426a6e8be7 | bcbfd8d66173f5adfa1553103a692c02500e7896 | refs/heads/master | 2022-12-12T22:07:56.647918 | 2022-11-23T19:13:48 | 2022-11-23T19:18:46 | 210,872,428 | 0 | 6 | MIT | 2021-11-21T12:58:47 | 2019-09-25T14:58:14 | Python | UTF-8 | Python | false | false | 811 | py | import random
# Generate a lottery
lottery = random.randint(0, 99)
# Prompt the user to enter a guess
guess = eval(input("Enter your lottery pick (two digits): "))
# Get digits from lottery
lotteryDigit1 = lottery // 10
lotteryDigit2 = lottery % 10
# Get digits from guess
guessDigit1 = guess // 10
guessDigit2 = guess % 10
print("The lottery number is", lottery)
# Check the guess
if guess == lottery:
print("Exact match: you win $10,000")
elif (guessDigit2 == lotteryDigit1 and \
guessDigit1 == lotteryDigit2):
print("Match all digits: you win $3,000")
elif (guessDigit1 == lotteryDigit1
or guessDigit1 == lotteryDigit2
or guessDigit2 == lotteryDigit1
or guessDigit2 == lotteryDigit2):
print("Match one digit: you win $1,000")
else:
print("Sorry, no match")
| [
"38396747+timmy61109@users.noreply.github.com"
] | 38396747+timmy61109@users.noreply.github.com |
02f7ab0bee68cd454347bcc87f684b94c7b503e2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2111/60832/263214.py | 493b6cd70cdd6be8f9b3626e503bb4888b869bfb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | n = int(input())
x = 2
i = 1
while i < n:
num = x
while num % 5 == 0:
num = num // 5
while num % 3 == 0:
num //= 3
while num % 2 == 0:
num //= 2
if num == 1:
i += 1
x += 1
print(x - 1) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
348dc2478aef4fefed9ef1a05d40b69aab88803f | 853c6a09af16fd4dd8a53efa8bde631e63315b59 | /BOJ BF/sum_subsequence.py | e1c9ee7e1ffa0b4065c89119d04a4ff13f4ba0ac | [] | no_license | Areum0921/Abox | 92840897b53e9bbab35c0e0aae5a017ab19a0500 | f4739c0c0835054afeca82484769e71fb8de47c8 | refs/heads/master | 2021-12-13T11:16:33.583366 | 2021-10-10T08:09:50 | 2021-10-10T08:09:50 | 176,221,995 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # 콤비네이션 안쓰고 리스트 안에 있는 숫자들을 조합하여 더할 수 있는 모든 경우의 값
# set 안쓰고 중복을 방지하고싶으면 False 배열을 만들고, 결과값 인덱스를 True 바꾸는 식으로 사용.
N=int(input())
s=list(map(int,input().split(" ")))
sum_list=[]
def dfs(x,y):
if(x==N):
sum_list.append(y)
return
dfs(x+1,y)
dfs(x+1,y+s[x])
dfs(0,0)
# 결과물 sum_list.sort()하면 None으로 나옴
# sorted(sum_list)하면 정상적으로 나옴
print(sorted(set(sum_list))) | [
"a90907@gmail.com"
] | a90907@gmail.com |
fe0604f50c4cfe8fa89dbadfb87823935a8e5e5f | 3591ab22e1cc0fc1362f909017a8aa5c2b53bd92 | /FundNoticeSpiders/BoYangInvestNotice.py | e5c41b22ec6938b39b38abc013bcd7137d30f716 | [] | no_license | Wnltc/ggscrapy | ef7e9559ce6140e7147f539778e25fc7f6cbee4c | bf929112e14b875a583803fe92980fe67129bdac | refs/heads/master | 2023-03-15T22:00:45.377540 | 2018-06-06T02:19:14 | 2018-06-06T02:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib.parse import urljoin
from scrapy.utils.response import get_base_url
from FundNoticeSpiders import GGFundNoticeItem
from FundNoticeSpiders import GGFundNoticeSpider
class BoYangInvestNoticeSpider(GGFundNoticeSpider):
name = 'FundNotice_BoYangInvestNotice'
sitename = '博洋投资'
entry = 'http://www.byfgroup.com/'
ips = [{
'url': 'http://www.byfgroup.com/news.jsp',
'ref': 'http://www.byfgroup.com/',
'ext': {'page': '1'}
}]
def parse_item(self, response):
ext = response.meta['ext']
page = int(ext['page'])
total_page = response.xpath('//a[text()="最后一页"]/@href').re_first(r'pageDirect\((\d+)\)')
if total_page is None or total_page == '':
total_page = 0
else:
total_page = int(total_page)
notices = response.xpath('//td[@class="newxxlist"]')
years = response.xpath('//td[@class="newxxdate"]/text()').re(r'(\d+-\d+-\d+)')
for row in notices:
url = row.xpath('./a/@href').extract_first()
url = urljoin(get_base_url(response), url)
title = row.xpath('./a//text()').extract_first()
publish_time = years.pop(0)
publish_time = datetime.strptime(publish_time, '%Y-%m-%d')
item = GGFundNoticeItem()
item['sitename'] = self.sitename
item['channel'] = self.channel
item['url_entry'] = self.entry
item['url'] = url
item['title'] = title
item['publish_time'] = publish_time
yield item
if page < total_page:
self.ips.append({
'url': 'http://www.byfgroup.com/news.jsp',
'form': {'page': str(page+1)},
'ref': response.url,
'ext': {'page': str(page+1)}
})
| [
"songxh@go-goal.com"
] | songxh@go-goal.com |
68a35022c085f4199fb83aa56e770d6c7eae54af | 3c119c145a00fbfc3b2d8841a3b9280fa4bc1da8 | /commons/utils/es_.py | 63a4ef747e86519b04ef4e015eb307990362ff25 | [] | no_license | disenQF/GoodsServer | 5cd54786d8c9b3444ffad38057fc62ebade87d3a | de7207b8fba0e60315ae7458978e210b325f305e | refs/heads/master | 2022-12-11T04:37:12.975547 | 2019-04-01T02:21:15 | 2019-04-01T02:21:15 | 177,748,032 | 3 | 1 | null | 2022-12-08T04:55:20 | 2019-03-26T08:45:59 | JavaScript | UTF-8 | Python | false | false | 2,861 | py | import pymysql
import requests
from pymysql.cursors import DictCursor
from FoodsAdminServer.settings import DATABASES
# 哪些数据添加到搜索引擎
sql = """
select c.name as cate_name,
f.id, f.name,f.price,f.image
from t_category c
join t_foods f on (c.id=f.category_id)
"""
HOST = 'localhost' # ES 的服务器地址
PORT = 9200 # ES RESTful 端口
URL_ = 'http://%s:%d' %(HOST, PORT)
def init_index(index_name):
url = URL_+ '/%s' % index_name
# 判断当前的index_name索引是否已存在
resp = requests.get(url)
data = resp.json()
if data.get('status') == 404:
# PUT , 添加索引
params = {
'settings': {
"number_of_shards": 5,
"number_of_replicas": 1
}
}
resp = requests.request('PUT', url, json=params)
data = resp.json()
if data.get('acknowledged'):
print('%s 索引创建成功' % index_name)
else:
print('%s 索引创建失败' % index_name)
else:
print('---- 索引已存在----')
def init_docs(index_name, type_name):
config = DATABASES.get('default')
del config['ENGINE']
config['DB'] = config.pop('NAME')
config = {key.lower():value for key, value in config.items()}
db = pymysql.Connect(**config)
with db.cursor(cursor=DictCursor) as c:
c.execute(sql)
for row_data in c.fetchall():
# 添加索引文档
url = URL_+ '/%s/%s/%s' % (index_name, type_name, row_data['id'])
resp = requests.post(url, json=row_data)
resp_result = resp.json()
if resp_result.get('created'):
print('----添加 %s-> %s 成功---' % (row_data['cate_name'], row_data['name']))
else:
print('----添加 %s-> %s 失败---' % (row_data['cate_name'], row_data['name']))
def update_doc(index_name, type_name, item):
url = URL_ + '/%s/%s/%s' % (index_name, type_name, item['id'])
requests.request('PUT', url, json=item)
def delete_doc(index_name, type_name, id_):
url = URL_ + '/%s/%s/%s' % (index_name, type_name, id_)
print('-->', url)
requests.request('DELETE', url)
def add_doc(index_name, type_name, item):
"""
:param index_name:
:param type_name:
:param item: {'cate_name': , 'id': , 'name': , 'price':, 'image':}
:return:
"""
url = URL_ + '/%s/%s/%s' % (index_name, type_name, item['id'])
resp = requests.post(url, json=item)
resp_result = resp.json()
if resp_result.get('created'):
print('----添加 %s-> %s 成功---' % (item['cate_name'], item['name']))
else:
print('----添加 %s-> %s 失败---' % (item['cate_name'], item['name']))
if __name__ == '__main__':
init_index('foods_site')
init_docs('foods_site', 'foods') | [
"610039018@qq.com"
] | 610039018@qq.com |
eccfa6654ec3fb088a7177d281c098151f918970 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2491/58758/260329.py | 584afe2be11bf0d64307db2c872bcfd9c6c35ace | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | nums1 = eval(input())
nums2 = eval(input())
nums1.sort()
nums2.sort()
ans = []
for i in nums1:
try:
ind = nums2.index(i)
nums2.remove(i)
ans.append(i)
except Exception:
continue
print(ans)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
38cd18e7d9ad3e43327fd7219a1a4dd1428fc53e | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /src/sentry/integrations/custom_scm/repository.py | 2667d60c50a84c2264e0af4de028c4fb0a2556f5 | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 1,797 | py | from django.http import Http404
from rest_framework.request import Request
from rest_framework.response import Response
from sentry.api.serializers import serialize
from sentry.models import Integration, Repository
from sentry.plugins.providers import IntegrationRepositoryProvider
class CustomSCMRepositoryProvider(IntegrationRepositoryProvider):
name = "CustomSCM"
repo_provider = "custom_scm"
def repository_external_slug(self, repo):
return repo.name
def dispatch(self, request: Request, organization, **kwargs):
"""
Adding a repository to the Custom SCM integration is
just two steps:
1. Change the provider from `null` to 'integrations:custom_scm'
2. Add the integration_id that is passed from the request
We set the `identifier` to be the repo's id in our db
when we call `get_repositories`. Normally this is the id or
identifier in the other service (i.e. the GH repo id)
"""
repo_id = request.data.get("identifier")
integration_id = request.data.get("installation")
try:
# double check the repository_id passed is not
# for an already 'claimed' repository
repo = Repository.objects.get(
organization_id=organization.id,
id=repo_id,
integration_id__isnull=True,
provider__isnull=True,
)
integration = Integration.objects.get(organizations=organization, id=integration_id)
except (Repository.DoesNotExist, Integration.DoesNotExist):
raise Http404
repo.provider = self.id
repo.integration_id = integration.id
repo.save()
return Response(serialize(repo, request.user), status=201)
| [
"noreply@github.com"
] | nagyist.noreply@github.com |
7115f457ba5937098412a7e1070fbe84366470d4 | 87ba55b289f5bf0451e03384ceb0531ddc7016eb | /setup.py | d2206a7e07f97300c5228c15c96ece87a2991229 | [
"Apache-2.0"
] | permissive | ck196/TensorFlowASR | 5e8e57c6f62947e97d968bb9153784394bd5846b | 16c81282f08fc31b08156bb179d59eea3daaf120 | refs/heads/main | 2023-03-28T07:34:45.623636 | 2021-05-22T13:18:08 | 2021-05-22T13:18:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | # Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fr:
requirements = fr.read().splitlines()
setuptools.setup(
name="TensorFlowASR",
version="1.0.1",
author="Huy Le Nguyen",
author_email="nlhuy.cs.16@gmail.com",
description="Almost State-of-the-art Automatic Speech Recognition using Tensorflow 2",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/TensorSpeech/TensorFlowASR",
packages=setuptools.find_packages(include=["tensorflow_asr*"]),
install_requires=requirements,
extras_require={
"tf2.3": ["tensorflow==2.3.2", "tensorflow-text==2.3.0", "tensorflow-io==0.16.0"],
"tf2.3-gpu": ["tensorflow-gpu==2.3.2", "tensorflow-text==2.3.0", "tensorflow-io==0.16.0"],
"tf2.4": ["tensorflow>=2.4", "tensorflow-text==2.4.3", "tensorflow-io==0.17.0"],
"tf2.4-gpu": ["tensorflow-gpu>=2.4", "tensorflow-text==2.4.3", "tensorflow-io==0.17.0"]
},
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules"
],
python_requires='>=3.6',
)
| [
"nlhuy.cs.16@gmail.com"
] | nlhuy.cs.16@gmail.com |
ceb964674032bd1ac4980a9a892671fa4f5d22d1 | 57a054c03419607bd0dad1c50b0692430a9ace40 | /home/migrations/0002_load_initial_data.py | f58c19e4fde560d2607585826423571fef1e099b | [] | no_license | crowdbotics-apps/project-1-25765 | a4c1d1cfff2378a7633cb1429303008a9301a8fa | 148acaf70e78f03c9428a73a5c4e1c60deb13da4 | refs/heads/master | 2023-04-10T13:40:54.952083 | 2021-04-18T19:49:15 | 2021-04-18T19:49:15 | 359,238,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Project_1"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Project_1</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "project-1-25765.botics.co"
site_params = {
"name": "Project_1",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
fe372976873228e0ed042f92dc498e7f69260681 | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script869.py | 6c72307996165323f1b6da8a8eb5e4ccbe7c4420 | [] | no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,142 | py |
# coding: utf-8
# # Intro
# **This is Lesson 7 in the [Deep Learning](https://www.kaggle.com/learn/machine-learning) track**
#
# The models you've built so far have relied on pre-trained models. But they aren't the ideal solution for many use cases. In this lesson, you will learn how to build totally new models.
#
# # Lesson
#
# In[1]:
from IPython.display import YouTubeVideo
YouTubeVideo('YbNE3zhtsoo', width=800, height=450)
# # Sample Code
# In[ ]:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout
a=pd.read_csv("../input/digit-recognizer/train.csv")
a.drop('label', axis=1)
img_rows, img_cols = 28, 28
num_classes = 10
def data_prep(raw):
out_y = keras.utils.to_categorical(raw.label, num_classes)
num_images = raw.shape[0]
x_as_array = raw.values[:,1:]
x_shaped_array = x_as_array.reshape(num_images, img_rows, img_cols, 1)
out_x = x_shaped_array / 255
return out_x, out_y
train_size = 30000
train_file = "../input/digit-recognizer/train.csv"
raw_data = pd.read_csv(train_file)
x, y = data_prep(raw_data)
model = Sequential()
model.add(Conv2D(20, kernel_size=(3, 3),
activation='relu',
input_shape=(img_rows, img_cols, 1)))
model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
model.fit(x, y,
batch_size=128,
epochs=2,
validation_split = 0.2)
# # Your Turn
# You are ready to [build your own model](https://www.kaggle.com/dansbecker/exercise-modeling-from-scratch).
#
# # Keep Going
# [Move on](https://www.kaggle.com/dansbecker/dropout-and-strides-for-larger-models) to learn two techniques that will make your models run faster and more robust to overfitting.
| [
"adithyagirish@berkeley.edu"
] | adithyagirish@berkeley.edu |
b91f4fe7546cb810a246edc25d61c27d766888e2 | c1a5a5779fa3cebee65d23d0216549f09fdffda5 | /508saver.py | f3f0e4d8410b7357f37db8805df69b203d2f6961 | [] | no_license | greenmac/python-morvan-tensorflow | b0821825f8857d969d4d60437334f8fbb2ca18aa | 0abd63e74b3f5a54f82337fb8deaf4edecef294f | refs/heads/master | 2020-04-11T06:20:28.315996 | 2019-01-08T13:57:27 | 2019-01-08T13:57:27 | 161,578,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # https://morvanzhou.github.io/tutorials/machine-learning/tensorflow/5-06-save/
# https://github.com/MorvanZhou/tutorials/blob/master/tensorflowTUT/tf19_saver.py
import tensorflow as tf
import numpy as np
# //////////
# ## Save to file
# ## remember to define the same dtype and shape when restore
# W = tf.Variable([[1, 2, 3], [3, 4, 5]], dtype=tf.float32, name='weights')
# b = tf.Variable([[1, 2, 3]], dtype=tf.float32, name='biases')
# init = tf.initialize_all_variables()
# saver = tf.train.Saver()
# with tf.Session() as sess:
# sess.run(init)
# save_path = saver.save(sess, "my_net/save_net.ckpt")
# print("Savee to path:", save_path)
# //////////
## restore variable
## redefine the same shape and same type for your variables
W = tf.Variable(np.arange(6).reshape((2, 3)), dtype=tf.float32, name='weights')
b = tf.Variable(np.arange(3).reshape((1, 3)), dtype=tf.float32, name='biases')
## not need init step
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "my_net/save_net.ckpt")
print("weights:", sess.run(W))
print("biases:", sess.run(b)) | [
"alwaysmac@msn.com"
] | alwaysmac@msn.com |
930a3906ee90eb771f1a686e1d0ba722cc78cf13 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_135/ch88_2020_05_06_11_49_20_934805.py | e6bba1cfd6fb8a4323e72c322207bfb9927fdc3b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | class Retangulo:
def __init__(self, inferior_esquerdo, superior_direito):
self.inferior_esquerdo = inferior_esquerdo
self.superior_direito = superior_direito
def calcula_perimetro(self):
lado_x = superior_direito.x - inferior_esquerdo.x
lado_y = superior_direito.y - inferior_esquerdo.y
perimetro = lado_x * 2 + lado_y * 2
return perimetro
def calcula_area(self):
lado_x = superior_direito.x - inferior_esquerdo.x
lado_y = superior_direito.y - inferior_esquerdo.y
area = lado_x * lado_y
return area | [
"you@example.com"
] | you@example.com |
e9a6a86f1f9c97aeebc0645f9143517e6480c3a1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/1367.py | 0cf8ef40104ecb429f0cbaab2ce61a8013ecb146 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | # Google Code jam Problem A Speaking in Tongues
# Apr. 13, 2012
# Python 3.2.3
import sys
import string
def ReadRules(d):
encrypted = 'ejp mysljylc kd kxveddknmc re jsicpdrysi rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd de kr kd eoya kw aej tysr re ujdr lkgc jv y e q z'
original = 'our language is impossible to understand there are twenty six factorial possibilities so it is okay if you want to just give up a o z q'
ency_splitted = encrypted.split()
orig_splitted = original.split()
len_words = len(ency_splitted)
for i in range(len_words):
len_letters = len(ency_splitted[i])
for j in range(len_letters):
a = ency_splitted[i][j]
b = orig_splitted[i][j]
if a not in d:
d[a] = b
d[' '] = ' '
d['\n'] = ''
return d
def main(inFileName):
inFile = open(inFileName, mode='r')
numberOfCases = int(inFile.readline())
d = {}
d = ReadRules(d)
#for (k, v) in sorted(d.items()):
# print(k + "->" + v + "\n")
for caseNumber in range(numberOfCases):
line = inFile.readline()
answer = ''
for i in range(len(line)):
answer += d[line[i]]
print('Case #' + str(caseNumber+1) + ': ' + answer )
if __name__ == '__main__':
main(sys.argv[1])
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
09df8a06e99dac1e29ef731782777407116b0ea2 | 9463b87c683fdada077cacca542998e19557f1e5 | /其他教学参考/随书源码/CH1.2-P015“百鸡百钱”问题.py | 8b5e3d725cc8267233b4d85feabb7731ed0b0688 | [] | no_license | hongm32/2020design | fa3d9e06b0e91be30784f3ad78bf65cbcfb3550b | 34b6a3c6a2740f049134eada1fbab6cacc154b0d | refs/heads/master | 2023-07-25T10:34:23.708781 | 2021-09-05T01:08:28 | 2021-09-05T01:08:28 | 303,857,648 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 945 | py | # “百鸡百钱”问题
# 是一个有名的数学问题,出自《张丘建算经》。其内容是:
# 公鸡5文钱1只,
# 母鸡3文钱1只,
# 小鸡3只1文钱,
# 用100文钱买100只鸡,其中公鸡、母鸡和小鸡都必须有,
# 问公鸡、母鸡和小鸡各多少只?
money = 100 # 一共100文钱
num = 100 # 一共100只鸡
cock_price = 5 # 公鸡价格5文
hen_price = 3 # 母鸡价格3文
chick_price = 3 # 3只小鸡1文
for cock_num in range(1, money // cock_price + 1): # 公鸡只数可能为1-20
for hen_num in range(1, money // hen_price + 1): # 母鸡只数可能为1-33
chick_num = num - cock_num - hen_num # 小鸡数量
money1 = cock_num * cock_price + hen_num * hen_price + chick_num / chick_price
if money1 == money:
print("公鸡:{: >2} 母鸡:{: >2} 小鸡:{}".format(cock_num, hen_num, chick_num))
input("运行完毕,请按回车键退出...")
| [
"XHM,1024cj"
] | XHM,1024cj |
54fd5dc3bc286cccf4c6d9ba9a7ddc091dcb1d07 | 4b0c57dddf8bd98c021e0967b5d94563d15372e1 | /run_MatrixElement/test/crabConfigFiles/crab_STopT_T_JESUp_cfg.py | c094b1e645d7a28aef753031438be4d92f8c137b | [] | no_license | aperloff/TAMUWW | fea6ed0066f3f2cef4d44c525ee843c6234460ba | c18e4b7822076bf74ee919509a6bd1f3cf780e11 | refs/heads/master | 2021-01-21T14:12:34.813887 | 2018-07-23T04:59:40 | 2018-07-23T04:59:40 | 10,922,954 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,511 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'run_MatrixElement_STopT_T_JESUp'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'PrivateMC'
config.JobType.scriptExe = 'JobScript.sh'
config.JobType.scriptArgs = ["numberOfEvents=100","SampleName=STopT_T_JESUp"]
config.JobType.psetName = 'emptyPSet_STopT_T_JESUp_cfg.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.inputFiles = ['FrameworkJobReport.xml','../../../../bin/slc6_amd64_gcc472/run_MatrixElement','../data/cteq5l.tbl', '../data/cteq6l.tbl', '../data/TF_TTbarMG_B_00_24.txt', '../data/TF_TTbarMG_G_00_24.txt', '../data/TF_TTbarMG_UDS_00_24.txt']
config.JobType.outputFiles = ['STopT_T_JESUp.root']
config.section_("Data")
config.Data.userInputFiles = ['root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/STopT_T_JESUp.root']
config.Data.primaryDataset = 'STopT_T_JESUp'
config.Data.splitting = 'EventBased'
config.Data.unitsPerJob = 1
NJOBS = 1661 # This is not a configuration parameter, but an auxiliary variable that we use in the next line.
config.Data.totalUnits = config.Data.unitsPerJob * NJOBS
config.Data.publication = False
config.Data.outLFNDirBase = '/store/user/aperloff/MatrixElement/Summer12ME8TeV/MEResults/rootOutput/'
config.Data.ignoreLocality = True
config.section_("Site")
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"aperloff@physics.tamu.edu"
] | aperloff@physics.tamu.edu |
556b24d6265074e5934dfa15c924c83202d587df | ff5d50f40629e50794a1fd4774a9a1a8ce3a2ecd | /controles/migrations/0001_initial.py | 5a23d36747b966a639429842817c16424b62b0fd | [] | no_license | thiagorocha06/mairimed | 0d9de3db03ff073de431c0d40e16b3c1c5b1d3fe | 6705e36b52410823c04b41db58e8f0b6b3f30b85 | refs/heads/master | 2022-12-13T07:37:49.619189 | 2018-12-30T16:03:49 | 2018-12-30T16:03:49 | 109,196,690 | 0 | 0 | null | 2022-12-08T02:23:39 | 2017-11-01T23:56:20 | Python | UTF-8 | Python | false | false | 2,743 | py | # Generated by Django 2.0.7 on 2018-10-18 21:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Glicemia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('glicemia', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Peso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('peso', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Pressao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sistolica', models.IntegerField(blank=True, null=True)),
('diastolica', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Temperatura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tempetura', models.IntegerField(blank=True, null=True)),
('data', models.DateField(default=django.utils.timezone.now)),
('hora', models.TimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"thiagorocha06@gmail.com"
] | thiagorocha06@gmail.com |
16528ea339ae9b698b0ceb7e36bc37dfd763c35a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_293/ch147_2020_04_26_03_40_37_343598.py | ffa76dab8a358a07959763021cc31259573a94f1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | def mais_frequente(lista):
dic = {}
mais_freq = 0
for e in lista:
if e not in dic:
dic[e] = 1
else:
dic[e] += 1
if mais_freq < dic[e]:
mais_freq = dic[e]
a = e
return a | [
"you@example.com"
] | you@example.com |
22b207801f06c41467931b863d2751b9314ccccd | 6ed01f4503fc9de234a561c945adff7cf4b1c81b | /ncar_lib/lib/frameworks.py | d00ac877a2c94b3b7ffa1bceab14ddcb9f7de3be | [] | no_license | ostwald/python-lib | b851943c913a68424a05ce3c7b42878ff9519f68 | 9acd97ffaa2f57b3e9e632e1b75016549beb29e5 | refs/heads/master | 2021-10-28T06:33:34.156095 | 2021-10-21T23:54:49 | 2021-10-21T23:54:49 | 69,060,616 | 0 | 1 | null | 2018-06-21T16:05:30 | 2016-09-23T21:04:46 | Roff | UTF-8 | Python | false | false | 6,986 | py | """
classes for reading library_dc and webcat XML records
"""
import os, sys, re, codecs
from JloXml import XmlRecord, XmlUtils
import globals
class NCARRec (XmlRecord):
"""
assumes a flat metadata structure (all fields are children of docRoot)
"""
field_list = None
id_field = None
description_field = None
xpath_delimiter = "/"
def __init__ (self, path=None, xml=None):
XmlRecord.__init__ (self, path, xml)
for attr in self.field_list:
setattr (self, attr, None)
for element in self.getElements(self.doc):
setattr (self, element.tagName, self.getText(element))
print 'set %s to %s' % (element.tagName, self.getText(element))
def getFieldValue (self, field):
path = "%s/%s" % (self.rootElementName, field)
value = self.getTextAtPath (path)
if not value is None:
value = value.strip()
return value
def getFieldValues (self, field):
path = "%s/%s" % (self.rootElementName, field)
nodes = self.selectNodes (self.dom, path)
values = []
for node in nodes:
value = self.getText (node)
if value is None:
continue
value = value.strip()
if value:
values.append (value)
return values
def getFieldElements (self, field):
path = "%s/%s" % (self.rootElementName, field)
return self.selectNodes (self.dom, path)
def numFieldValues (self, field):
path = "%s/%s" % (self.rootElementName, field)
nodes = self.selectNodes (self.dom, path)
return len(nodes)
def addFieldValue (self, field, value):
"""
do not add a value if this field already has it
strip value before adding
"""
path = "%s/%s" % (self.rootElementName, field)
element = self.addElement (self.doc, field)
if not value is None:
value = value.strip()
if not value in self.getFieldValues (field):
self.setText (element, value)
def setFieldValue (self, field, value):
"""
if there are existing values, this will change the first only
"""
path = "%s/%s" % (self.rootElementName, field)
if not value is None:
value = value.strip()
element = self.selectSingleNode (self.dom, path)
if not element:
element = self.addElement (self.doc, field)
self.setText (element, value)
def removeField (self, field):
path = "%s/%s" % (self.rootElementName, field)
nodes = self.selectNodes (self.dom, path)
for node in nodes:
self.deleteElement (node)
def setFieldValues (self, field, values):
self.removeField (field)
self.addFieldValues (field, values)
def addFieldValues (self, field, values):
for value in values:
self.addFieldValue (field, value)
def orderFields (self):
""" based on converter.Converter """
elements = self.doc.childNodes
# print "-------------"
mycmp = lambda x, y:cmp (self.field_list.index(x.tagName),
self.field_list.index(y.tagName))
if elements:
elements.sort(mycmp)
def getId (self):
return self.getFieldValue (self.id_field)
def setId (self, value):
self.setFieldValue (self.id_field, value)
def getDescription (self):
return self.getFieldValue (self.description_field)
def setDescription (self, value):
self.setFieldValue (self.description_field, value)
class WebcatRec (NCARRec):
rootElementName = "record"
## issue_delimiter = re.compile ("(?P<issue>NCAR.+?) [:-] (?P<title>[a-zA-Z].+)") # - for all but manuscripts, which use :
issue_delimiter = re.compile ("(?P<issue>NCAR.+?)[\s]+[:-][\s]+(?P<title>.+)") # - for all but manuscripts, which use :
field_list = globals.webcat_fields
id_field = "recordID"
accessionNum_field = "accessionNum"
description_field = "description"
def __init__ (self, path=None, xml=None):
NCARRec.__init__ (self, path, xml)
def getAccessionNum (self):
return self.getFieldValue (accessionNum_field)
def getPublishers (self):
return self.getFieldValues ("publisher")
def getScientificDivisions (self):
return self.getFieldValues ("scientificDivision")
class LibraryDCRec_v1_0 (NCARRec):
"""
made obsolete (~2/09) when framework changed to contain a single namespace!
we are always writing a new rec, not reading an existing one ...
xsi:schemaLocation="http://www.dlsciences.org/frameworks/library_dc
http://www.dlsciences.org/frameworks/library_dc/1.0/schemas/library_dc.xsd"
"""
rootElementName = "library_dc:record"
schemaUri = "http://www.dlsciences.org/frameworks/library_dc/1.0/schemas/library_dc.xsd"
nameSpaceUri = "http://www.dlsciences.org/frameworks/library_dc"
dcNameSpaceUri = "http://purl.org/dc/elements/1.1/"
field_list = globals.library_dc_fields
id_field = "library_dc:recordID"
url_field = "library_dc:URL"
description_field = "dc:description"
altTitle_field = "library_dc:altTitle"
instName_field = "library_dc:instName"
instDivision_field = "library_dc:instDivision"
def __init__ (self, path=None):
if path:
XmlRecord.__init__ (self, path=path)
else:
self.makeRecord ()
def makeRecord (self):
xml = "<%s xmlns:library_dc='%s' />" % (self.rootElementName, self.nameSpaceUri)
NCARRec.__init__ (self, xml=xml)
self.doc.setAttribute ("xmlns:library_dc", self.nameSpaceUri)
self.doc.setAttribute ("xmlns:dc", self.dcNameSpaceUri)
self.doc.setAttribute ("xmlns:"+self.schema_instance_namespace, \
self.SCHEMA_INSTANCE_URI)
self.setSchemaLocation (self.schemaUri, self.nameSpaceUri)
def getUrl (self):
return self.getFieldValue (self.url_field)
def setUrl (self, value):
self.setFieldValue (self.url_field, value)
def getAltTitle (self):
return self.getFieldValue (self.altTitle_field)
def setAltTitle (self, value):
self.setFieldValue (self.altTitle_field, value)
def getInstName (self):
return self.getFieldValue (self.instName_field)
def setInstName (self, value):
self.setFieldValue (self.instName_field, value)
def getInstDivisions (self):
return self.getFieldValues (self.instDivision_field)
def setInstDivisions (self, value):
self.setFieldValues (self.instDivision_field, value)
def getTitle (self):
return self.getFieldValue ("dc:title")
def getIssue (self):
return self.getFieldValue ("library_dc:issue")
def setIssue (self, val):
return self.setFieldValue ("library_dc:issue", val)
def getContributors (self):
return self.getFieldValues ("dc:contributor")
def getCreators (self):
return self.getFieldValues ("dc:creators")
def LibraryDCRecTester ():
rec = LibraryDCRec_v1_0 ()
rec.setFieldValue ("library_dc:URL", "http://fooberry/index.html")
print "URL: %s" % rec.getFieldValue ("library_dc:URL")
rec.setUrl ("imachanged")
print "URL: %s" % rec.getUrl()
rec.addFieldValues ("dc:subject", ['sub1', 'sub2'])
print rec
rec.addFieldValues ("dc:subject", ['sub3', 'sub4'])
print rec
print "number of dc:subject fields: %d" % rec.numFieldValues ("dc:subject")
print "number of dc:Xsubject fields: %d" % rec.numFieldValues ("dc:Xsubject")
rec.removeField ("dc:subject")
print rec
if __name__ == "__main__":
LibraryDCRecTester ()
| [
"ostwald@ucar.edu"
] | ostwald@ucar.edu |
7d2b7099645047f346ca3482c84e6f3449782ee8 | 767e864a1b1a2722b4952fb5034a776064b2ef64 | /sentry_youtrack/youtrack.py | 392e297142606b6935d89406888fa5f14f6bb367 | [] | no_license | pombredanne/sentry-youtrack | 18403a9c218e65bc044cfa6244f1fe63fd298638 | 1d1b11aeaf63299c8b1aa83a814d708c23d9cb8a | refs/heads/master | 2021-01-22T09:09:55.134392 | 2013-11-04T22:50:08 | 2013-11-04T22:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | import requests
from BeautifulSoup import BeautifulStoneSoup
class YouTrackError(Exception):
pass
class YouTrackClient(object):
LOGIN_URL = '/rest/user/login'
PROJECT_URL = '/rest/admin/project/<project_id>'
PROJECTS_URL = '/rest/project/all'
CREATE_URL = '/rest/issue'
ISSUES_URL = '/rest/issue/byproject/<project_id>'
COMMAND_URL = '/rest/issue/<issue>/execute'
CUSTOM_FIELD_VALUES = '/rest/admin/customfield/<param_name>/<param_value>'
USER_URL = '/rest/admin/user/<user>'
API_KEY_COOKIE_NAME = 'jetbrains.charisma.main.security.PRINCIPAL'
def __init__(self, url, username=None, password=None, api_key=None):
self.url = url.rstrip('/') if url else ''
if api_key is None:
self._login(username, password)
else:
self.cookies = {self.API_KEY_COOKIE_NAME: api_key}
self.api_key = api_key
def _login(self, username, password):
credentials = {
'login': username,
'password': password
}
url = self.url + self.LOGIN_URL
self._request(url, data=credentials, method='post')
self.cookies = self.response.cookies
self.api_key = self.cookies.get(self.API_KEY_COOKIE_NAME)
def _request(self, url, data=None, params=None, method='get'):
if method not in ['get', 'post']:
raise AttributeError("Invalid method %s" % method)
kwargs = {
'url': url,
'data': data,
'params': params
}
if hasattr(self, 'cookies'):
kwargs['cookies'] = self.cookies
if method == 'get':
self.response = requests.get(**kwargs)
elif method == 'post':
self.response = requests.post(**kwargs)
self.response.raise_for_status()
return BeautifulStoneSoup(self.response.text)
def _get_enumeration(self, soap):
if soap.find('error'):
raise YouTrackError(soap.find('error').string)
return [item.text for item in soap.enumeration]
def get_project_name(self, project_id):
url = self.url + self.PROJECT_URL.replace('<project_id>', project_id)
soap = self._request(url, method='get')
return soap.project['name']
def get_user(self, user):
url = self.url + self.USER_URL.replace('<user>', user)
soap = self._request(url, method='get')
return soap.user
def get_projects(self):
url = self.url + self.PROJECTS_URL
soap = self._request(url, method='get')
return soap.projects
def get_priorities(self):
values = self.get_custom_field_values('bundle', 'Priorities')
return self._get_enumeration(values)
def get_issue_types(self):
values = self.get_custom_field_values('bundle', 'Types')
return self._get_enumeration(values)
def get_custom_field_values(self, name, value):
url = self.url + (self.CUSTOM_FIELD_VALUES
.replace("<param_name>", name)
.replace('<param_value>', value))
response = requests.get(url, cookies=self.cookies)
return BeautifulStoneSoup(response.text)
def get_project_issues(self, project_id, query=None, offset=0, limit=15):
url = self.url + self.ISSUES_URL.replace('<project_id>', project_id)
params = {'max': limit, 'after': offset, 'filter': query}
soap = self._request(url, method='get', params=params)
return soap.issues
def create_issue(self, data):
url = self.url + self.CREATE_URL
soap = self._request(url, data=data, method='post')
return soap.issue
def execute_command(self, issue, command):
url = self.url + self.COMMAND_URL.replace('<issue>', issue)
data = {'command': command}
self._request(url, data=data, method='post')
def add_tags(self, issue, tags):
for tag in tags:
cmd = u'add tag %s' % tag
self.execute_command(issue, cmd)
| [
"adam@bogdal.pl"
] | adam@bogdal.pl |
ad0979e6aca66c863e3842d3e0935bfb6eda761d | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/dice-roll-simulation/395113484.py | 4bebf6dda3ade473f97b66109b045114c3d4405d | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # title: dice-roll-simulation
# detail: https://leetcode.com/submissions/detail/395113484/
# datetime: Sun Sep 13 22:57:31 2020
# runtime: 120 ms
# memory: 14 MB
class Solution:
def dieSimulator(self, n: int, rollMax: List[int]) -> int:
MOD = 10 ** 9 + 7
dp =collections.deque([[1] * 6 for i in range(max(rollMax) + 1)])
for i in range(2, n + 1):
dp2 = [0] * 6
for j in range(6):
if i - rollMax[j] <= 0:
dp2[j] = sum(dp[-1]) % MOD
elif i - rollMax[j] == 1:
dp2[j] = (sum(dp[-1]) - 1) % MOD
else:
p = dp[-rollMax[j] - 1]
dp2[j] = (sum(dp[-1]) - sum(p) + p[j]) % MOD
dp.popleft()
dp.append(dp2)
return sum(dp[-1]) % MOD
| [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
714411dc03e2aaedad3968d900b044a60fada680 | 189e14e07571b4d5720f01db73faaaef26ee9712 | /dj/dj/settings.py | e0d57bfd76c00098690ab693bede6126bd0d0bff | [] | no_license | italomaia/mylittlework | d7e936fa3f24f40ea6a95e7ab01a10208036f439 | f6d0aacec46f8c2adb429626fff0532e8939b8b8 | refs/heads/master | 2021-01-19T19:13:48.605376 | 2017-04-10T02:01:10 | 2017-04-10T02:01:10 | 86,644,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | """
Django settings for dj project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qug)assem8iz7&z=ayzkh4w((riz*l!s!1%09gz32#&0=0z=bo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static_files"),
]
| [
"italo.maia@gmail.com"
] | italo.maia@gmail.com |
1f1772421914f015926ea0d471a316d3cdc7ee23 | 766da3ffcbd26e7f58d711f5b0e7312cb365e9fb | /framework/utils/time_meter.py | d1d9c26db12e5b47cb70fb745317ca636bd3c553 | [
"MIT"
] | permissive | RobertCsordas/ndr | 1277b353eb61267e023b671072730bdc7e779ca5 | da20530dfb4336deddfbe5e79d62e72d1dc2580e | refs/heads/master | 2023-09-02T22:38:57.601098 | 2021-11-19T09:52:23 | 2021-11-19T09:52:23 | 414,588,414 | 20 | 4 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import time
class ElapsedTimeMeter:
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def _curr_timer(self) -> float:
if self.start_time is None:
return 0
return time.time() - self.start_time
def stop(self):
self.sum += self._curr_timer()
self.start_time = None
def get(self, reset=False) -> float:
res = self.sum + self._curr_timer()
if reset:
self.reset()
return res
def reset(self):
self.start_time = None
self.sum = 0
def __enter__(self):
assert self.start_time is None
self.start()
def __exit__(self, *args):
self.stop()
| [
"xdever@gmail.com"
] | xdever@gmail.com |
e8eab1a20e20aaa2466c43899cd2dbc957cfe1d7 | e3afb1720da16d9437b58ee3839e6184f521d9ee | /05_POST/main.py | 0a316d92d7b61e1ce6515e37c63d58b583fe4ed9 | [] | no_license | jaisenbe58r/APIs_Python | 1517d0bafa7a97bd460292514784105b9b005529 | afae603b2a2df86e65b06b9967c4960835767ae5 | refs/heads/master | 2021-04-11T12:43:16.883269 | 2020-03-21T17:05:26 | 2020-03-21T17:05:26 | 249,022,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import requests
import os
import json
if __name__ == '__main__':
url = "https://httpbin.org/post"
payload = {'nombre':'Jaime', 'curso':'python', 'nivel':'intermedio'}
response = requests.post(url, data = json.dumps(payload))
if response.status_code == 200:
print(response.content) | [
"jsendra@autis.es"
] | jsendra@autis.es |
1b68bf740ed1005e4796f4811ed3a0fb17945fc6 | f8bbdfb112618136fc4adccb03ce25fbfc48bff5 | /panel/module/management_ranking/CustomProcesses/ScoreCompilingProcess.py | 7f3e9d1df3d356a08670fc04958730bb997eec4e | [] | no_license | lazypanda10117/CICSA-Ranking-Platform | 160973987b533ede6e0b94af29b5bc85646b2bc0 | d5f6ac64a1f85c3333c71a7d81749b49145b9a16 | refs/heads/master | 2022-12-09T23:21:28.649252 | 2020-04-28T22:53:07 | 2020-04-28T22:53:07 | 133,093,367 | 3 | 2 | null | 2021-09-22T17:51:39 | 2018-05-11T22:14:01 | Python | UTF-8 | Python | false | false | 1,607 | py | from django.shortcuts import redirect
from django.shortcuts import reverse
from misc.CustomFunctions import RequestFunctions
from api.model_api import EventAPI
from api.model_api import SummaryAPI
from panel.module.base.block.CustomProcesses import AbstractBaseProcess
class ScoreCompilingProcess(AbstractBaseProcess):
def process(self):
post_dict = dict(self.request.POST)
event_id = int(self.param["id"])
related_summaries = SummaryAPI(self.request).filterSelf(summary_event_parent=event_id)
for i in range(1, int(len(post_dict)/4)+1):
school_id = int(RequestFunctions.getSingleRequestObj(post_dict, 'school_id_' + str(i)))
score = int(RequestFunctions.getSingleRequestObj(post_dict, 'score_' + str(i)))
ranking = int(RequestFunctions.getSingleRequestObj(post_dict, 'ranking_' + str(i)))
override_ranking = int(RequestFunctions.getSingleRequestObj(post_dict, 'override_ranking_' + str(i)))
summary_id = related_summaries.get(summary_event_school=school_id).id
result = dict(ranking=ranking, override_ranking=override_ranking, race_score=score)
SummaryAPI(self.request).updateSummaryResult(summary_id, result)
EventAPI(self.request).updateEventStatus(event_id, 'done')
return redirect(
reverse(
'panel.module.management_ranking.view_dispatch_param',
args=['activity', event_id]
)
)
def parseParams(self, param):
super().parseMatch('\d+')
param = dict(id=param)
return param
| [
"jeffreykam0415@gmail.com"
] | jeffreykam0415@gmail.com |
1a58f3399d6440b08b067dfb5c463b434e8e21a5 | 6a423fba995b1106086998477eb2bbd1953d3e70 | /js_events/cms_menus.py | 8bf76b09548418fb76b73807dc14013811acb02c | [] | no_license | compoundpartners/js-events | c94addf5c3d4440ed3d170b1232d753120b92262 | 3d2798c6e197cce96d246305642fed1002ce67f7 | refs/heads/master | 2023-08-09T05:52:03.545468 | 2023-07-24T15:06:36 | 2023-07-24T15:06:36 | 170,514,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from django.core.urlresolvers import NoReverseMatch
except ImportError:
# Django 2.0
from django.urls import NoReverseMatch
from django.utils.translation import (
get_language_from_request,
ugettext_lazy as _,
)
from cms.menu_bases import CMSAttachMenu
from cms.apphook_pool import apphook_pool
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from .models import Event
class EventsMenu(CMSAttachMenu):
name = _('Events Menu')
def get_queryset(self, request):
"""Returns base queryset with support for preview-mode."""
queryset = Event.objects
if not (request.toolbar and request.toolbar.edit_mode_active):
queryset = queryset.published()
return queryset
def get_nodes(self, request):
nodes = []
language = get_language_from_request(request, check_path=True)
events = self.get_queryset(request).active_translations(language)
if hasattr(self, 'instance') and self.instance:
app = apphook_pool.get_apphook(self.instance.application_urls)
if app:
config = app.get_config(self.instance.application_namespace)
if config:
events = events.filter(app_config=config)
for event in events:
try:
url = event.get_absolute_url(language=language)
except NoReverseMatch:
url = None
if url:
node = NavigationNode(event.safe_translation_getter(
'title', language_code=language), url, event.pk)
nodes.append(node)
return nodes
menu_pool.register_menu(EventsMenu)
| [
"evgeny.dmi3ev@gmail.com"
] | evgeny.dmi3ev@gmail.com |
c5af36d24bce66eace96ce089931f566d69ce2bc | 7860d9fba242d9bdcb7c06c32ee4064e4a7fa2f1 | /litex_boards/platforms/trenz_max1000.py | 9772cf7130da1dc27fbf2845ca2f848cf350ffce | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | litex-hub/litex-boards | ef1f200fd6d34c96621f4efa094ede874f4c34ab | b92c96b3a445fde31037f593a40fe621f85cb58c | refs/heads/master | 2023-09-03T15:09:11.198560 | 2023-08-30T15:22:11 | 2023-08-30T15:22:11 | 191,191,221 | 291 | 283 | BSD-2-Clause | 2023-09-03T20:32:58 | 2019-06-10T15:09:10 | Python | UTF-8 | Python | false | false | 4,043 | py | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019-2021 Antti Lukats <antti.lukats@gmail.com>
# SPDX-License-Identifier: BSD-2-Clause
#
# http://trenz.org/max1000-info
from litex.build.generic_platform import *
from litex.build.altera import AlteraPlatform
from litex.build.altera.programmer import USBBlaster
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk12", 0, Pins("H6"), IOStandard("3.3-V LVTTL")),
# Leds
("user_led", 0, Pins("A8"), IOStandard("3.3-V LVTTL")),
("user_led", 1, Pins("A9"), IOStandard("3.3-V LVTTL")),
("user_led", 2, Pins("A11"), IOStandard("3.3-V LVTTL")),
("user_led", 3, Pins("A10"), IOStandard("3.3-V LVTTL")),
("user_led", 4, Pins("B10"), IOStandard("3.3-V LVTTL")),
("user_led", 5, Pins("C9"), IOStandard("3.3-V LVTTL")),
("user_led", 6, Pins("C10"), IOStandard("3.3-V LVTTL")),
("user_led", 7, Pins("D8"), IOStandard("3.3-V LVTTL")),
# Buttons
("user_btn", 0, Pins("E6"), IOStandard("3.3-V LVTTL")),
("user_btn", 1, Pins("E7"), IOStandard("3.3-V LVTTL")), # nConfig.
# Serial
("serial", 0,
Subsignal("tx", Pins("B4"), IOStandard("3.3-V LVTTL")),
Subsignal("rx", Pins("A4"), IOStandard("3.3-V LVTTL"))
),
# SPI Flash
("spiflash4x", 0,
Subsignal("cs_n", Pins("B3")),
Subsignal("clk", Pins("A3")),
Subsignal("dq", Pins("A2", "B2", "B9", "C4")),
IOStandard("3.3-V LVTTL")
),
("spiflash", 0,
Subsignal("cs_n", Pins("B3")),
Subsignal("clk", Pins("A3")),
Subsignal("mosi", Pins("A2")),
Subsignal("miso", Pins("B2")),
Subsignal("wp", Pins("B9")),
Subsignal("hold", Pins("C4")),
IOStandard("3.3-V LVTTL"),
),
# SDRAM
("sdram_clock", 0, Pins("M9"), IOStandard("3.3-V LVTTL")),
("sdram", 0,
Subsignal("a", Pins(
"K6 M5 N5 J8 N10 M11 N9 L10",
"M13 N8 N4 M10")),
Subsignal("ba", Pins("N6 K8")),
Subsignal("cs_n", Pins("M4")),
Subsignal("cke", Pins("M8")),
Subsignal("ras_n", Pins("M7")),
Subsignal("cas_n", Pins("N7")),
Subsignal("we_n", Pins("K7")),
Subsignal("dq", Pins(
"D11 G10 F10 F9 E10 D9 G9 F8",
"F13 E12 E13 D12 C12 B12 B13 A12")),
Subsignal("dm", Pins("E9 F12")),
IOStandard("3.3-V LVTTL")
),
# all IO not connected to peripherals mapped to MFIO
# <- LEDS -> <- PMOD -> <- D0..D14, D11R, D12R -> <- AIN0..AIN7 -> JE [C O I S i1 i2]sw
("bbio", 0, Pins("A8 A9 A11 A10 B10 C9 C10 D8 M3 L3 M2 M1 N3 N2 K2 K1 H8 K10 H5 H4 J1 J2 L12 J12 J13 K11 K12 J10 H10 H13 G12 B11 G13 E1 C2 C1 D1 E3 F1 E4 B1 E5 J6 J7 K5 L5 J5 L4 E6"),
IOStandard("3.3-V LVTTL")),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(AlteraPlatform):
default_clk_name = "clk12"
default_clk_period = 1e9/12e6
def __init__(self, toolchain="quartus"):
AlteraPlatform.__init__(self, "10M08SAU169C8G", _io, toolchain=toolchain)
self.add_platform_command("set_global_assignment -name FAMILY \"MAX 10\"")
self.add_platform_command("set_global_assignment -name ENABLE_CONFIGURATION_PINS OFF")
self.add_platform_command("set_global_assignment -name INTERNAL_FLASH_UPDATE_MODE \"SINGLE IMAGE WITH ERAM\"")
def create_programmer(self):
return USBBlaster()
def do_finalize(self, fragment):
AlteraPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk12", loose=True), 1e9/12e6)
# Generate PLL clock in STA
self.toolchain.additional_sdc_commands.append("derive_pll_clocks")
# Calculates clock uncertainties
self.toolchain.additional_sdc_commands.append("derive_clock_uncertainty")
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
b8d06a8e1b6fd0756d5f515307ecef361a32c7f9 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/logic/utilities/dimacs.py | bc3b091d40e5b4892e961122c7f6f09c982cc07c | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | #import pythonista
"""For reading in DIMACS file format
www.cs.ubc.ca/~hoos/SATLIB/Benchmarks/SAT/satformat.ps
"""
from __future__ import print_function, division
from sympy.core import Symbol
from sympy.logic.boolalg import And, Or
import re
def load(s):
"""Loads a boolean expression from a string.
Examples
========
>>> from sympy.logic.utilities.dimacs import load
>>> load('1')
cnf_1
>>> load('1 2')
Or(cnf_1, cnf_2)
>>> load('1 \\n 2')
And(cnf_1, cnf_2)
>>> load('1 2 \\n 3')
And(Or(cnf_1, cnf_2), cnf_3)
"""
clauses = []
lines = s.split('\n')
pComment = re.compile('c.*')
pStats = re.compile('p\s*cnf\s*(\d*)\s*(\d*)')
while len(lines) > 0:
line = lines.pop(0)
# Only deal with lines that aren't comments
if not pComment.match(line):
m = pStats.match(line)
if not m:
nums = line.rstrip('\n').split(' ')
list = []
for lit in nums:
if lit != '':
if int(lit) == 0:
continue
num = abs(int(lit))
sign = True
if int(lit) < 0:
sign = False
if sign:
list.append(Symbol("cnf_%s" % num))
else:
list.append(~Symbol("cnf_%s" % num))
if len(list) > 0:
clauses.append(Or(*list))
return And(*clauses)
def load_file(location):
"""Loads a boolean expression from a file."""
with open(location) as f:
s = f.read()
return load(s)
| [
"tberk@gmx.at"
] | tberk@gmx.at |
57ed5ba90da3aa7395be14a2b86bb1c3a1d84f41 | 10c9ef03f6916f9596d18ecc28c0c73f548017ad | /manage.py | e59608d55b3e953ea1e3a79cf395e6127c56395b | [] | no_license | WinningAddicted/website-Django | 2127756b1b9ca6389e79822f9df59207ea9a1d46 | 79bef48f408df9123d28840fba1179710a9f7b38 | refs/heads/master | 2021-01-11T01:10:12.765425 | 2016-10-25T13:07:59 | 2016-10-25T13:07:59 | 71,049,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yatharth.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"="
] | = |
296fcbafb9e14304b02ae171edcc38f915888c75 | 59f0fde411ca668b874fa6fa6001069b9146f596 | /src/blog/migrations/0001_initial.py | 84fd012107ea912917ff84da08179642506c4756 | [] | no_license | nabilatajrin/django-blog-application | 4c256755fc31b41f609b44a5329fb128d46c5fa1 | 7971f8f7d8b3b442fbd4530bc0f32dff7865adcc | refs/heads/master | 2020-12-06T16:09:00.310415 | 2020-11-03T05:37:34 | 2020-11-03T05:37:34 | 232,503,248 | 0 | 0 | null | 2020-01-08T07:24:39 | 2020-01-08T07:19:38 | Python | UTF-8 | Python | false | false | 474 | py | # Generated by Django 2.2 on 2020-01-09 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog1Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
],
),
]
| [
"nabilatajrin@gmail.com"
] | nabilatajrin@gmail.com |
339639d4a52ad804b5f0d74045f5f8bc6b486ab6 | c94f888541c0c430331110818ed7f3d6b27b788a | /ak_05b080ffa82d4d06b1e7a357a34277ba/python/setup.py | 3c6ee05721510fdc60c55feb4c7cd7b6f1706b3b | [
"Apache-2.0",
"MIT"
] | permissive | alipay/antchain-openapi-prod-sdk | 48534eb78878bd708a0c05f2fe280ba9c41d09ad | 5269b1f55f1fc19cf0584dc3ceea821d3f8f8632 | refs/heads/master | 2023-09-03T07:12:04.166131 | 2023-09-01T08:56:15 | 2023-09-01T08:56:15 | 275,521,177 | 9 | 10 | MIT | 2021-03-25T02:35:20 | 2020-06-28T06:22:14 | PHP | UTF-8 | Python | false | false | 2,650 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for antchain_ak_05b080ffa82d4d06b1e7a357a34277ba.
Created on 19/08/2022
@author: Ant Chain SDK
"""
PACKAGE = "antchain_sdk_ak_05b080ffa82d4d06b1e7a357a34277ba"
NAME = "antchain_ak_05b080ffa82d4d06b1e7a357a34277ba" or "alibabacloud-package"
DESCRIPTION = "Ant Chain Ak_05b080ffa82d4d06b1e7a357a34277ba SDK Library for Python"
AUTHOR = "Ant Chain SDK"
AUTHOR_EMAIL = "sdk-team@alibabacloud.com"
URL = "https://github.com/alipay/antchain-openapi-prod-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"antchain_alipay_util>=1.0.1, <2.0.0",
"alibabacloud_tea_util>=0.3.6, <1.0.0",
"alibabacloud_rpc_util>=0.0.4, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["antchain","ak","05b080ffa82d4d06b1e7a357a34277ba"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
bce1e3aee1f648f85291bf76517e5522f6502ef0 | 3af6960c805e9903eb27c09d8bc7ebc77f5928fe | /problems/0056_Merge_Intervals/__init__.py | 5304b1da2e9a95667938b3f5520a802216339283 | [] | no_license | romain-li/leetcode | b3c8d9d4473eebd039af16ad2d4d99abc2768bdd | 5e82b69bd041c2c168d75cb9179a8cbd7bf0173e | refs/heads/master | 2020-06-04T20:05:03.592558 | 2015-06-08T18:05:03 | 2015-06-08T18:05:03 | 27,431,664 | 2 | 1 | null | 2015-06-08T18:05:04 | 2014-12-02T12:31:58 | Python | UTF-8 | Python | false | false | 298 | py | ID = '56'
TITLE = 'Merge Intervals'
DIFFICULTY = 'Hard'
URL = 'https://oj.leetcode.com/problems/merge-intervals/'
BOOK = False
PROBLEM = r"""Given a collection of intervals, merge all overlapping intervals.
For example,
Given `[1,3],[2,6],[8,10],[15,18]`,
return `[1,6],[8,10],[15,18]`.
"""
| [
"romain_li@163.com"
] | romain_li@163.com |
cf139f390e7f58784c0fcf319c590eff3ad17f0e | 556403cb93b2fdd464c3aef4cba4f1c3dc42e9d7 | /AutomationProject/demo/getTestname.py | 1e04817bef4bb45df162c33780e2b5c0aac891af | [] | no_license | msivakumarm/PycharmProjects | 4d90a0105f334f2393d30fe46dc650808002b4fd | 7d84194a576f9ec8356ff272642d07dbddc48d42 | refs/heads/master | 2020-09-06T14:42:12.945424 | 2019-11-08T11:42:14 | 2019-11-08T11:42:14 | 219,989,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import inspect
# functions
def whoami():
return inspect.stack()[1][3]
def myfunc():
x=whoami()
print(x)
myfunc() | [
"sivakumarm.mamillapalli@gmail.com"
] | sivakumarm.mamillapalli@gmail.com |
949d8a85b10490cceaf3bf00e17d2898bd3f6164 | 48d820d4bd6a433c2b0fdb0dcb7657b62db050bf | /Training_Work/ODOO_C_MODULE_BACKUPS/new_app/controllers/controllers.py | 3d7cfd1ee67329ccccc1c9ed2fdf8fe04ca77a97 | [] | no_license | dhruv-aktiv/training_task_data | 1a30580a512aa4831fb547b250faffff11f7e008 | 3d8b25ca812e876a484d387fc57272257322c85f | refs/heads/master | 2023-06-07T07:06:04.193576 | 2021-07-01T04:37:13 | 2021-07-01T04:37:13 | 381,908,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | # -*- coding: utf-8 -*-
# from odoo import http
# class NewApp(http.Controller):
# @http.route('/new_app/new_app/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/new_app/new_app/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('new_app.listing', {
# 'root': '/new_app/new_app',
# 'objects': http.request.env['new_app.new_app'].search([]),
# })
# @http.route('/new_app/new_app/objects/<model("new_app.new_app"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('new_app.object', {
# 'object': obj
# })
| [
"dhruv.s@icreativetechnolabs.com"
] | dhruv.s@icreativetechnolabs.com |
3b6a96c0a23de192aef4979a78648815eb239d92 | 992eeae8bb54ac7eb258994049e74474f090eb36 | /13. Exam Prep Questions/08. SoftUni Past Exams/03. Mobile Operator.py | dc6f231cff698f876d138de9b09d21bcfbc97dc0 | [] | no_license | elenaborisova/Python-Basics | 4ae856ee50a543479902467167f3b8f8169284b8 | b7476e3c25c2448f4f12a29e16f20caa10b72038 | refs/heads/main | 2023-01-04T17:17:17.091347 | 2020-10-26T22:56:11 | 2020-10-26T22:56:11 | 307,164,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | contract_duration = input() # one or two years
contract_type = input() # small middle large or extralarge
mobile_data_added = input() # yes or no
number_of_months = int(input())
contract_price = 0
if contract_duration == "one":
if contract_type == "Small":
contract_price = 9.98
elif contract_type == "Middle":
contract_price = 18.99
elif contract_type == "Large":
contract_price = 25.98
elif contract_type == "ExtraLarge":
contract_price = 35.99
elif contract_duration == "two":
if contract_type == "Small":
contract_price = 8.58
elif contract_type == "Middle":
contract_price = 17.09
elif contract_type == "Large":
contract_price = 23.59
elif contract_type == "ExtraLarge":
contract_price = 31.79
if mobile_data_added == "yes":
if contract_price <= 10:
contract_price += 5.50
elif contract_price <= 30:
contract_price += 4.35
elif contract_price > 30:
contract_price += 3.85
if contract_duration == "two":
contract_price -= contract_price * 0.0375
print(f"{contract_price * number_of_months:.2f} lv.") | [
"elenaborrisova@gmail.com"
] | elenaborrisova@gmail.com |
d1b6fae5395dae27e6940600476a13e19aa1ece8 | 38a9eda76b10e0cceafc4a255f79c22df8847001 | /logrec/dataprep/split/samecase/random_word_selector_for_splitting.py | a48b3453b655ee7984a4fe21c6ecacb18a091c73 | [
"MIT"
] | permissive | hlibbabii/log-recommender | 4272bb1c91c5c213ee4944930584af713da01c6d | 03c975da4029676acb2c29f5915e30b2b29fce6c | refs/heads/master | 2022-12-21T06:28:04.702117 | 2020-03-27T11:39:30 | 2020-03-27T11:39:30 | 123,821,082 | 2 | 1 | MIT | 2022-11-22T03:57:00 | 2018-03-04T19:25:35 | Python | UTF-8 | Python | false | false | 3,689 | py | import logging
import math
import os
import random
import re
from collections import defaultdict
from logrec.dataprep import base_project_dir
logger = logging.getLogger(__name__)
base_dataset_dir = f'{base_project_dir}/nn-data/devanbu_no_replaced_identifier_split_no_tabs_new_splits3_under_5000_15_percent/'
path_to_labeled_data = os.path.join(base_dataset_dir, 'sample.txt')
path_to_labeled_data2 = os.path.join(base_dataset_dir, 'sample2.txt')
vocab_file = os.path.join(base_dataset_dir, 'vocab.txt')
def get_already_selected_words():
selected_words = defaultdict(list)
key_to_labeled_data = {}
with open(path_to_labeled_data, 'r') as f:
for line in f:
split_line = line[:-1].split('|')
original_word = split_line[0]
key_to_labeled_data[original_word] = line[:-1]
selected_words[len(original_word)].append(original_word)
selected_words.default_factory = None
return selected_words, key_to_labeled_data
def print_dict_diffs(dict1, dict2, max_possible_word_length=100):
for i in range(max_possible_word_length):
if i + 1 in dict1 or i + 1 in dict2:
print(f'{i+1}: {dict1[i+1] if i+1 in dict1 else 0} --> {dict2[i+1] if i+1 in dict2 else 0}')
def log_proportional(dict, degree, total_sum):
dict_log = {}
dict_log_rounded = {}
for key, val in dict.items():
dict_log[key] = math.log(val) ** degree
all_log = sum(dict_log.values())
for key, val in dict.items():
dict_log[key] = dict_log[key] / all_log * total_sum
dict_log_rounded[key] = math.ceil(dict_log[key])
all_log_prop_rounded = sum(dict_log_rounded.values())
n_to_substract = all_log_prop_rounded - total_sum
keys_of_largest = sorted(dict_log_rounded.items(), key=lambda x: x[1], reverse=True)[:n_to_substract]
for key, _ in keys_of_largest:
dict_log_rounded[key] -= 1
return dict_log_rounded
def get_dict(vocab_file):
dict = defaultdict(list)
with open(vocab_file, 'r') as f:
for l in f:
line = l.split(" ")
dict[len(line[0])].append(line[0])
dict.default_factory = None
return dict
def randomly_select_words_from_dict(dict, already_selected_words=defaultdict(list)):
already_selected_words.default_factory = list
dict_stats = {k: len(v) for k, v in dict.items()}
dict_stats_log_proportional = log_proportional(dict_stats, 5, 1000)
items = dict_stats_log_proportional.items()
for k, v in items:
while v > len(already_selected_words[k]):
picked_word = random.choice(dict[k])
if picked_word not in already_selected_words[k] and re.fullmatch("[a-z]+", picked_word):
already_selected_words[k].append(picked_word)
return already_selected_words
def write_sample_to_files(length_to_words_dict, original_to_labeled_data, file):
words = [w for k, w_list in length_to_words_dict.items() for w in w_list]
with open(file, 'w') as f:
for w in words:
if w in original_to_labeled_data:
w = original_to_labeled_data[w]
f.write(f'{w}\n')
if __name__ == '__main__':
dict = get_dict(vocab_file)
dict_stats = {k: len(v) for k, v in dict.items()}
dict_stats_log_proportional = log_proportional(dict_stats, 5, 1000)
already_selected_words, key_to_labeled_data = get_already_selected_words()
selected_words = randomly_select_words_from_dict(dict, already_selected_words)
write_sample_to_files(selected_words, key_to_labeled_data, path_to_labeled_data2)
print_dict_diffs({k: len(v) for k, v in already_selected_words.items()}, dict_stats_log_proportional)
| [
"hlibbabii@gmail.com"
] | hlibbabii@gmail.com |
1d06373897f48a555acb6a7d135865bc7e91c296 | a7f16c95f973905e880ad4dc277fbba890486654 | /wildlifecompliance/migrations/0156_merge_20190404_1353.py | d96e85d324a57c0b6ffd2aafb82f55f0ed27fe98 | [
"Apache-2.0"
] | permissive | dbca-wa/wildlifecompliance | 9e98e9c093aeb25dbb7ff8d107be47e29bcd05e1 | cb12ad9ea1171f10b5297cdb7e1eb6ea484e633d | refs/heads/master | 2023-08-08T14:37:05.824428 | 2023-07-31T02:57:23 | 2023-07-31T02:57:23 | 232,276,030 | 1 | 17 | NOASSERTION | 2023-07-31T02:57:24 | 2020-01-07T08:12:53 | Python | UTF-8 | Python | false | false | 361 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-04-04 05:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0155_merge_20190329_1202'),
('wildlifecompliance', '0153_auto_20190401_1535'),
]
operations = [
]
| [
"brendan.blackford@dbca.wa.gov.au"
] | brendan.blackford@dbca.wa.gov.au |
74b80d103cc37eb12487e7e844178fe41b54c302 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_mjsull_question3.py | b771467002762d05249323aa63a62a9f07232319 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 3,080 | py | import sys
out_file = open(sys.argv[2], 'w')
from math import sqrt
def mrange(start, stop, step):
while start < stop:
yield start
start += step
def is_prime(num):
if num == 2:
return True
if (num < 2) or (num % 2 == 0):
return False
return all(num % i for i in mrange(3, int(sqrt(num)) + 1, 2))
def rwh_primes2(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Input n>=6, Returns a list of primes, 2 <= p < n """
correction = (n%6>1)
n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]
sieve = [True] * (n/3)
sieve[0] = False
for i in xrange(int(n**0.5)/3+1):
if sieve[i]:
k=3*i+1|1
sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)
sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)
return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]
with open(sys.argv[1]) as in_file:
num_cases = int(in_file.readline().rstrip())
for i in range(1, num_cases + 1):
coin_length, num_coins = in_file.readline().split()
num_coins = int(num_coins)
coin_length = int(coin_length)
out_file.write('Case #' + str(i) + ':\n')
coins = []
denoms = []
coin_int = int(str(10 ** (coin_length - 1) + 1), 2)
last_coin_int = int('1' * coin_length)
prime_list = rwh_primes2(1000)
while len(coins) < num_coins:
coin = "{0:b}".format(coin_int)
if coin == str(last_coin_int):
break
if coin[-1] == '0':
coin_int += 1
continue
the_denomins = []
for j in range(2, 11):
the_int = int(coin, j)
for k in prime_list:
if the_int % k == 0:
the_denomins.append(k)
break
if len(the_denomins) == 9:
coins.append(coin)
denoms.append(the_denomins)
coin_int += 1
coin_int = int(str(10 ** (coin_length - 1) + 1), 2)
while len(coins) < num_coins:
coin = "{0:b}".format(coin_int)
if coin[-1] == '0' or coin in coins:
coin_int += 1
continue
the_denomins = []
for j in range(2, 11):
the_int = int(coin, j)
if is_prime(the_int):
break
else:
lowest_denom = None
x = 2
while lowest_denom is None:
if the_int % x == 0:
lowest_denom = x
x += 1
the_denomins.append(lowest_denom)
if len(the_denomins) == 9:
coins.append(coin)
denoms.append(the_denomins)
coin_int += 1
for j, k in zip(coins, denoms):
out_file.write(j + ' ' + ' '.join(map(str, k)) + '\n')
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
5917a97739223f8f73a354d07010fcee9adf413b | 9848a719ddfdd21b5fe1fa2f55da290c0f6952dc | /find-positive-integer-solution-for-a-given-equation.py | b82f8bf6dc218a2aad30c5ddee48ba089412517b | [] | no_license | maomao905/algo | 725f7fe27bb13e08049693765e4814b98fb0065a | 84b35ec9a4e4319b29eb5f0f226543c9f3f47630 | refs/heads/master | 2023-03-13T12:38:55.401373 | 2021-03-25T01:55:48 | 2021-03-25T01:55:48 | 351,278,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | """
binary search
O(XlogY)
"""
from typing import List
"""
This is the custom function interface.
You should not implement it, or speculate about its implementation
"""
class CustomFunction:
# Returns f(x, y) for any given positive integers x and y.
# Note that f(x, y) is increasing with respect to both x and y.
# i.e. f(x, y) < f(x + 1, y), f(x, y) < f(x, y + 1)
def f(self, x, y):
return x * y
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
def bsearch(x):
l,r = 1,1000
while l<r:
mid = (l+r)//2
res = customfunction.f(x,mid)
if res == z:
return mid
elif res < z:
l = mid+1
else:
r = mid-1
return l
ans = []
# fix x and calculate y
for x in range(1,1001):
y = bsearch(x)
if customfunction.f(x,y) == z:
ans.append([x,y])
return ans
"""
matrix search
- reduct search space row/column one by one
if mat[i][j] > z: mat[i+1][j] (go down)
if mat[i][j] < z: mat[i][j-1] (go left)
if mat[i][j] == z: mat[i-1][j-1] (go left and down)
1 2 3 4 5
2 3 4 5 6
3 4 6 7 8
4 5 6 7 10
5 7 9 11 12
O(X+Y)
"""
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
ans = []
x,y = 1,1000
while 1<=x<=1000 and 1<=y<=1000:
res = customfunction.f(x,y)
if res < z:
x += 1
elif res > z:
y -= 1
else:
ans.append([x,y])
x += 1
y -= 1
return ans
"""
optimized binary search
possible y
x= 1, 1 2 3 4 5 6 -> if binary search returns 3 and f(x,y)==z
x=2, 1 2 -> possible y is only 1 or 2, y < 3 since f(x,y)<f(x+1,y)
loop X time and search time is logY + log(Y-1) + log(Y-2) .... 1
O(XlogY)
"""
class Solution:
def findSolution(self, customfunction: 'CustomFunction', z: int) -> List[List[int]]:
def bsearch(x,r):
l=1
while l<r:
mid = (l+r)//2
res = customfunction.f(x,mid)
if res == z:
return mid
elif res < z:
l = mid+1
else:
r = mid-1
return l
ans = []
r=1000
# fix x and calculate y
for x in range(1,1001):
y = bsearch(x,r)
if customfunction.f(x,y) == z:
ans.append([x,y])
r = y
return ans
customfunction = CustomFunction()
s = Solution()
print(s.findSolution(customfunction, 5))
| [
"maoya.sato@gmail.com"
] | maoya.sato@gmail.com |
935e4d71d8e2cc2d2787b422938bbc1c20cbd6cd | a451c9f6b4e0fad4efdc7440e6a180fbed580a15 | /DB_lab1/randomize_db.py | 5e1d4b4a9d2b5511379e642aa8d0c96c90898ab2 | [] | no_license | serhiisad/DB-labs-2018 | 0124c6b997ad641fb952e62f9caee6e63ff0459f | 8316d2ce6a7cfa2b45a567d4e981260435893d44 | refs/heads/master | 2020-03-29T19:51:52.931565 | 2018-12-27T20:54:52 | 2018-12-27T20:54:52 | 150,284,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | import rand_data.parser as parser
from database import Database as db
from datetime import *
import names
import random
from entities.entities import *
from database import Database
def get_random_name():
return names.get_full_name()
def get_rand_boolean():
return random.choice([True, False])
def get_rand_date(min_year=1970, max_year=datetime.now().year):
# generate a datetime in format yyyy-mm-dd hh:mm:ss.000000
# start = datetime(min_year, 1, 1, 00, 00, 00)
start = date(min_year, 1, 1)
years = max_year - min_year + 1
end = start + timedelta(days=365 * years)
return start + (end - start) * random.random()
def random_fill_db(projects_cnt=4, teamleads_cnt=3, teams_cnt=3, devs_cnt=13):
db = Database() #better to pass it to the random_fill_db
db.clear_all()
for k in range(devs_cnt-1):
new_dev = Developer(get_random_name(), str(get_rand_date()), str(get_rand_boolean()))
db.create_developer(new_dev)
teams_list = parser.tolist_teams()
for k in range(teams_cnt-1):
new_team = random.choice(teams_list)
db.create_team(new_team)
projects = parser.tolist_projects()
for k in range(projects_cnt-1):
new_proj = random.choice(projects)
db.create_project(new_proj)
for k in range(teamleads_cnt-1):
new_tl = Teamlead(names.get_full_name())
db.create_teamlead(new_tl)
| [
"serhiisad.kpi@gmail.com"
] | serhiisad.kpi@gmail.com |
7d01fbe9aa522fa6202068b4dedf8344b95f56ea | a46fc5187245f7ac79758ae475d4d865e24f482b | /edit_distance/edit_distance.py | 5dd8b9754dc67ee9bc34183673021cd15ce19585 | [] | no_license | narnat/leetcode | ae31f9321ac9a087244dddd64706780ea57ded91 | 20a48021be5e5348d681e910c843e734df98b596 | refs/heads/master | 2022-12-08T00:58:12.547227 | 2020-08-26T21:04:53 | 2020-08-26T21:04:53 | 257,167,879 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | #!/usr/bin/env python3
class Solution:
''' Levenshtein distance '''
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
dp[i][0] = i
for j in range(1, n + 1):
dp[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
no_match = 1
if word2[j - 1] == word1[i - 1]:
no_match = 0
dp[i][j] = min(dp[i - 1][j] + 1, dp[i][j - 1] +
1, dp[i - 1][j - 1] + no_match)
print(dp, '\n\n')
return dp[m][n]
class Solution_2:
''' Levenshtein distance, space optimized '''
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
dp = [0] * (n + 1)
for j in range(1, n + 1):
dp[j] = j
for i in range(1, m + 1):
pre = dp[0]
dp[0] = i
for j in range(1, n + 1):
no_match = 1
if word2[j - 1] == word1[i - 1]:
no_match = 0
cur = dp[j]
dp[j] = min(dp[j - 1] + 1,
dp[j] + 1,
pre + no_match)
pre = cur
return dp[n]
if __name__ == '__main__':
minDistance = Solution().minDistance
minDistance("horse", "ros")
# minDistance("intention", "execution")
| [
"farruh1996@gmail.com"
] | farruh1996@gmail.com |
f7ab2d6e05b9a9ae4214f93604ca92b85456a336 | e14372adf86d3c4f9e73c9f7111db3215c696c3d | /3.算法小抄/排序/10.排序算法总结.py | dc74972ad4d1b42d5b3b41139e6b38b0038e6a8a | [] | no_license | hewei-bit/PYTHON_learning | 71ddd7560a52575528547187f4fb40f39a3cbbdb | 18de8e5bdca165df5a5a4b5e0887846593656f4e | refs/heads/master | 2022-12-02T13:38:05.907135 | 2020-08-13T04:57:41 | 2020-08-13T04:57:41 | 261,647,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,620 | py | """
这次收集整理并用Python实现了八大经典排序算法,
包括冒泡排序,插入排序,选择排序,希尔排序,归并排序,快速排序,堆排序以及基数排序。
希望能帮助到有需要的同学。之所以用 Python 实现,
主要是因为它更接近伪代码,能用更少的代码实现算法,更利于理解。
本篇博客所有排序实现均默认从小到大。
"""
from time import *
class sort:
# 一、冒泡排序
"""
冒泡排序的原理非常简单,它重复地走访过要排序的数列,
一次比较两个元素,如果他们的顺序错误就把他们交换过来。
"""
def bubble_sort(self, ary):
n = len(ary)
for i in range(n):
flag = True
for j in range(1, n - i):
if ary[j - 1] > ary[j]:
temp = ary[j - 1]
ary[j - 1] = ary[j]
ary[j] = temp
flag = False
if flag:
break
return ary
# 二、选择排序
"""
选择排序是另一个很容易理解和实现的简单排序算法。
学习它之前首先要知道它的两个很鲜明的特点。
1. 运行时间和输入无关
2. 数据移动是最少的
"""
def select_sort(self, ary):
n = len(ary)
for i in range(0, n):
min = i # 最小元素下表标记
for j in range(i + 1, n):
if ary[j] < ary[min]:
min = j # 找到最小值下标
ary[min], ary[i] = ary[i], ary[min] # 交换两者
# 三、插入排序 InsertionSort
"""
插入排序的工作原理是,对于每个未排序数据,
在已排序序列中从后向前扫描,找到相应位置并插入。
"""
def insert_sort(self, ary):
n = len(ary)
for i in range(1, n):
key = i - 1
mark = ary[i]
while key >= 0 and ary[key] > mark:
ary[key + 1] = ary[key]
key -= 1
ary[key + 1] = mark
return ary
# 四、希尔排序 ShellSort
"""
希尔排序的实质就是分组插入排序,该方法又称缩小增量排序
"""
def shell_sort(self, ary):
count = len(ary)
gap = round(count / 2)
# 双杠用于整除(向下取整),在python直接用 “/” 得到的永远是浮点数,
# 用round()得到四舍五入值
while gap >= 1:
for i in range(gap, count):
temp = ary[i]
j = i
while j - gap >= 0 and ary[j - gap] > temp:
ary[j] = ary[j - gap]
j -= gap
ary[j] = temp
gap = round(gap / 2)
return ary
# 五、归并排序 MergeSort
def Merge_sort(self, ary: list) -> list:
if len(ary) <= 1:
return ary
median = int(len(ary) / 2) # 二分分解
left = self.Merge_sort(ary[:median])
right = self.Merge_sort(ary[median:])
return self.merge(left, right) # 合并数组
def merge(self, left: list, right: list) -> list:
"""合并操作,
将两个有序数组left[]和right[]合并成一个大的有序数组"""
res = []
i = j = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
res.append(left[i])
i += 1
else:
res.append(right[j])
j += 1
res = res + left[i:] + right[j:]
return res
# 六、快速排序 QuickSort
"""
快速排序通常明显比同为Ο(n log n)的其他算法更快,
因此常被采用,而且快排采用了分治法的思想,
所以在很多笔试面试中能经常看到快排的影子。可见掌握快排的重要性。
步骤:
1.从数列中挑出一个元素作为基准数。
2.分区过程,将比基准数大的放到右边,小于或等于它的数都放到左边。
3.再对左右区间递归执行第二步,直至各区间只有一个数。
"""
def quick_sort(self, ary):
return self.qsort(ary, 0, len(ary) - 1)
def qsort(self, ary, start, end):
if start < end:
left = start
right = end
key = ary[start]
else:
return ary
while left < right:
while left < right and ary[right] >= key:
right -= 1
if left < right: # 说明打破while循环的原因是ary[right] < key
ary[left] = ary[right]
left += 1
while left < right and ary[left] < key:
left += 1
if left < right: # 说明打破while循环的原因是ary[left] >= key
ary[right] = ary[left]
right -= 1
ary[left] = key # 此时,left=right,用key来填坑
self.qsort(ary, start, left - 1)
self.qsort(ary, left + 1, end)
return ary
# arr[] --> 排序数组
# low --> 起始索引
# high --> 结束索引
# 快速排序函数
def quickSort_2(self, arr, low, high):
if low < high:
pi = self.partition(arr, low, high)
self.quickSort_2(arr, low, pi - 1)
self.quickSort_2(arr, pi + 1, high)
def partition(self, arr, low, high):
i = (low - 1) # 最小元素索引
pivot = arr[high]
for j in range(low, high):
# 当前元素小于或等于 pivot
if arr[j] <= pivot:
i = i + 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return i + 1
# 七、堆排序 HeapSort
def heap_sort(self, ary: list):
n = len(ary)
first = int(n / 2 - 1) # 最后一个非叶子节点
for start in range(first, -1, -1): # 构建最大堆
self.max_heapify(ary, start, n - 1)
for end in range(n - 1, 0, -1): # 堆排,将最大跟堆转换成有序数组
ary[end], ary[0] = ary[0], ary[end] # 将根节点元素与最后叶子节点进行互换,取出最大根节点元素,对剩余节点重新构建最大堆
self.max_heapify(ary, 0, end - 1) # 因为end上面取的是n-1,故而这里直接放end-1,相当于忽略了最后最大根节点元素ary[n-1]
return ary
# 最大堆调整:将堆的末端子节点做调整,使得子节点永远小于父节点
# start 为当前需要调整最大堆的位置。
def max_heapify(self, ary: list, start: int, end: int):
root = start
while True:
child = root * 2 + 1 # 找到左子节点
if child > end: # 如果左子节点超过最后一个元素
break
if child + 1 <= end and ary[child] < ary[child + 1]:
child = child + 1 # 如果左子节点比较大
if ary[root] < ary[child]: # 选择较大元素成为父节点
ary[root], ary[child] = ary[child], ary[root]
root = child
else:
break
# 八、计数排序
def counting_sort(self, arr: list, maxValue: int) -> list:
bucketlen = maxValue + 1
bucket = [0] * bucketlen
sortIndex = 0
arrlen = len(arr)
for i in range(arrlen):
if not bucket[arr[i]]:
bucket[arr[i]] = 0
bucket[arr[i]] += 1
for j in range(bucketlen):
while bucket[j] > 0:
arr[sortIndex] = j
sortIndex += 1
bucket[j] -= 1
return arr
# 九、基数排序
def radix_sort(self, arr: list):
"""基数排序"""
i = 0 # 记录当前正在排拿一位,最低位为1
max_num = max(arr) # 最大值
j = len(str(max_num)) # 记录最大值的位数
while i < j:
bucket_list = [[] for _ in range(10)] # 初始化桶数组
for x in arr:
bucket_list[int(x / (10 ** i)) % 10].append(x) # 找到位置放入桶数组
arr.clear()
for x in bucket_list: # 放回原序列
for y in x:
arr.append(y)
i += 1
return arr
# 十、桶排序
def bucket_sort(self, arr: list):
if not arr:
return 0
maxValue = max(arr)
bucket = [0] * (maxValue + 1)
sort_list = []
for i in arr:
bucket[i] += 1
for j in range(len(bucket)):
if bucket[j] != 0:
for k in range(bucket[j]):
sort_list.append(j)
return sort_list
if __name__ == '__main__':
l = [123, 42, 543, 345, 12, 321, 12]
# arr = [10, 7, 8, 9, 1, 5]
# sort().bubble_sort(l) #冒泡
# sort().select_sort(l) #选择
# sort().insert_sort(l) #插入
# sort().shell_sort(l) #希尔
# ll = sort().Merge_sort(l) #归并
# ll = sort().quick_sort(l) #快速
# ll = sort().heap_sort(l) # 堆排序
# ll = sort().counting_sort(l,max(l)) #计数排序
# ll = sort().radix_sort(l) # 基数排序
# ll = sort().bucket_sort(l)
# 第二个快速排序
n = len(l)
sort().quickSort_2(l, 0, n - 1)
print("排序后的数组:")
for i in range(n):
print("%d" % l[i])
# begin_time = time()
# end_time = time()
# runtime = end_time - begin_time
# print(runtime)
# print(ll)
| [
"1003826976@qq.com"
] | 1003826976@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.