blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
135a875898921530dc0d9ed13b5bd02d13a96cbc
|
ee2af8c0fdc65f44ed9a4295806d75fb09257b58
|
/saif/google_api_integreation/__manifest__.py
|
db20488330aedd7f71c7ecfb68f2ce9990548508
|
[] |
no_license
|
sc4you/odoo-project
|
02b81ff4920a69d3e79c5dcc605a794779c5a77c
|
2ef439ef54f1165c3569a1047cd5cb6a0b50572e
|
refs/heads/master
| 2020-03-21T13:34:52.555402
| 2018-03-19T12:26:39
| 2018-03-19T12:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# -*- coding: utf-8 -*-
{
'name': 'Google Docs Integration',
'category': 'Extra Tools',
'summary': 'Spreadsheet, Document, Presentation',
'description': 'Google Docs Integration: This Module lets you to develop,'\
'read and modify Spreadsheet, Document, Presentation',
'author': 'Muhammad Awais',
'depends':['base','project','sale'],
'application': True,
'data': ['views/template.xml','security/security.xml','security/ir.model.access.csv'],
}
|
[
"ta.awaisajaz@gmail.com"
] |
ta.awaisajaz@gmail.com
|
1b8bf4767e7e81816c259accadb336a80b752300
|
1a13cf55de87bf9fd7cd5911ab7bd9d9c1f88241
|
/tests/programscache.py
|
2382802032fded66be7aa14c186b0adc155ddd14
|
[
"Apache-2.0"
] |
permissive
|
jeperez/winreg-kb
|
c59ead2d593b4ec375b77d7a9c49fbec35b9f156
|
a50fcfc89e3fac282f276b12fb67807ddb56ef10
|
refs/heads/master
| 2021-01-19T11:34:33.293074
| 2017-04-03T06:28:00
| 2017-04-03T06:28:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Programs Cache information collector."""
import unittest
from dfwinreg import registry as dfwinreg_registry
from winregrc import collector
from winregrc import output_writer
from winregrc import programscache
from tests import test_lib as shared_test_lib
class TestOutputWriter(output_writer.StdoutOutputWriter):
"""Class that defines a test output writer.
Attributes:
text (list[str]): text.
"""
def __init__(self):
"""Initializes an output writer object."""
super(TestOutputWriter, self).__init__()
self.text = []
def WriteText(self, text):
"""Writes text to stdout.
Args:
text: the text to write.
"""
self.text.append(text)
class ProgramsCacheDataParserTest(shared_test_lib.BaseTestCase):
"""Tests for the Programs Cache data parser."""
# TODO: add tests.
class ProgramsCacheCollectorTest(shared_test_lib.BaseTestCase):
"""Tests for the Programs Cache information collector."""
@shared_test_lib.skipUnlessHasTestFile([u'NTUSER.DAT'])
def testCollect(self):
"""Tests the Collect function."""
registry_collector = collector.WindowsRegistryCollector()
test_path = self._GetTestFilePath([u'NTUSER.DAT'])
registry_collector.ScanForWindowsVolume(test_path)
self.assertIsNotNone(registry_collector.registry)
collector_object = programscache.ProgramsCacheCollector()
test_output_writer = TestOutputWriter()
collector_object.Collect(registry_collector.registry, test_output_writer)
test_output_writer.Close()
# TODO: fix test.
self.assertEqual(test_output_writer.text, [])
def testCollectEmpty(self):
"""Tests the Collect function on an empty Registry."""
registry = dfwinreg_registry.WinRegistry()
collector_object = programscache.ProgramsCacheCollector()
test_output_writer = TestOutputWriter()
collector_object.Collect(registry, test_output_writer)
test_output_writer.Close()
self.assertEqual(len(test_output_writer.text), 0)
if __name__ == '__main__':
unittest.main()
|
[
"joachim.metz@gmail.com"
] |
joachim.metz@gmail.com
|
fa05b68b68103da6eba41cb3eace31abf9f4ba74
|
4a5a39858bab54d9fe06364ecfe8edc2747b87f6
|
/Code Jam 2018/Round 1C/ant-stack.py
|
ddc1e8535674f7ccbaa880e66e31d6b637f53b28
|
[] |
no_license
|
gsakkas/code-jam
|
d85a63c11d13ba405b9df4be1e6739ef5c5394ae
|
8e81a4d9b2ea11d9bbb9b3e206951a2261798458
|
refs/heads/master
| 2021-01-22T19:54:24.732574
| 2018-05-17T12:14:06
| 2018-05-17T12:14:06
| 85,257,349
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
def read_int():
return int(raw_input())
def read_many_ints():
return map(int, raw_input().split())
def solve(n):
ws = read_many_ints()
large = 10 ** 100
sums = [[large] * min(139, n) for _ in xrange(n)]
for i in xrange(n):
sums[i][0] = ws[i]
# sums = {}
# sums[0] = ws[0]
# for i in xrange(1, n):
# sums[i] = 0
for i in xrange(1, n):
for j in xrange(1, min(139, i + 1)):
if sums[i - 1][j - 1] <= 6 * ws[i]:
sums[i][j] = min(sums[i - 1][j - 1] + ws[i], sums[i - 1][j])
else:
sums[i][j] = sums[i - 1][j]
j = n - 1
while j >= 0 and sums[n - 1][j] == large:
j -= 1
return j + 1
if __name__ == "__main__":
t = read_int()
for test in xrange(1, t + 1):
n = read_int()
print "Case #{}: {}".format(test, solve(n))
exit(0)
|
[
"george.p.sakkas@gmail.com"
] |
george.p.sakkas@gmail.com
|
af7343241d25adfa0239fc48d6b1c29e0fd2cfcf
|
360ae1188ad79e71ccc72da0b9ae709bda678f91
|
/ryu/services/protocols/__init__.py
|
340a42305b81a40727ffe472e0a96ccaa638aed4
|
[
"Apache-2.0"
] |
permissive
|
faucetsdn/ryu
|
47b3523e7ccb381f3bdf2877a3f9f01cb1876054
|
d6cda4f427ff8de82b94c58aa826824a106014c2
|
refs/heads/master
| 2023-09-05T06:37:21.991029
| 2022-06-09T23:09:40
| 2022-06-09T23:09:40
| 2,945,007
| 385
| 215
|
Apache-2.0
| 2022-11-13T10:50:25
| 2011-12-09T03:43:50
|
Python
|
UTF-8
|
Python
| false
| false
| 682
|
py
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
[
"fujita.tomonori@lab.ntt.co.jp"
] |
fujita.tomonori@lab.ntt.co.jp
|
b96de974ca34505ea68a7002e1eaca1fdf7e1661
|
076e0ebd618ed406808e9009a70d886e8bdb1bbf
|
/grafeas/grafeas_v1/__init__.py
|
98e1ad1c8c28a4c25705f3c56a2ad03ad7d539b0
|
[
"Apache-2.0"
] |
permissive
|
isabella232/python-grafeas
|
8edb1c3b79e51292f1612489775b51a96033049c
|
a806330d0f344eb0b97e351d7e5ba34b8ae9b740
|
refs/heads/master
| 2022-12-15T09:53:51.979968
| 2020-09-22T22:15:19
| 2020-09-22T22:15:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,916
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.grafeas import GrafeasClient
from .types.attestation import AttestationNote
from .types.attestation import AttestationOccurrence
from .types.build import BuildNote
from .types.build import BuildOccurrence
from .types.common import NoteKind
from .types.common import RelatedUrl
from .types.common import Signature
from .types.cvss import CVSSv3
from .types.deployment import DeploymentNote
from .types.deployment import DeploymentOccurrence
from .types.discovery import DiscoveryNote
from .types.discovery import DiscoveryOccurrence
from .types.grafeas import BatchCreateNotesRequest
from .types.grafeas import BatchCreateNotesResponse
from .types.grafeas import BatchCreateOccurrencesRequest
from .types.grafeas import BatchCreateOccurrencesResponse
from .types.grafeas import CreateNoteRequest
from .types.grafeas import CreateOccurrenceRequest
from .types.grafeas import DeleteNoteRequest
from .types.grafeas import DeleteOccurrenceRequest
from .types.grafeas import GetNoteRequest
from .types.grafeas import GetOccurrenceNoteRequest
from .types.grafeas import GetOccurrenceRequest
from .types.grafeas import ListNoteOccurrencesRequest
from .types.grafeas import ListNoteOccurrencesResponse
from .types.grafeas import ListNotesRequest
from .types.grafeas import ListNotesResponse
from .types.grafeas import ListOccurrencesRequest
from .types.grafeas import ListOccurrencesResponse
from .types.grafeas import Note
from .types.grafeas import Occurrence
from .types.grafeas import UpdateNoteRequest
from .types.grafeas import UpdateOccurrenceRequest
from .types.image import Fingerprint
from .types.image import ImageNote
from .types.image import ImageOccurrence
from .types.image import Layer
from .types.package import Architecture
from .types.package import Distribution
from .types.package import Location
from .types.package import PackageNote
from .types.package import PackageOccurrence
from .types.package import Version
from .types.provenance import AliasContext
from .types.provenance import Artifact
from .types.provenance import BuildProvenance
from .types.provenance import CloudRepoSourceContext
from .types.provenance import Command
from .types.provenance import FileHashes
from .types.provenance import GerritSourceContext
from .types.provenance import GitSourceContext
from .types.provenance import Hash
from .types.provenance import ProjectRepoId
from .types.provenance import RepoId
from .types.provenance import Source
from .types.provenance import SourceContext
from .types.upgrade import UpgradeDistribution
from .types.upgrade import UpgradeNote
from .types.upgrade import UpgradeOccurrence
from .types.upgrade import WindowsUpdate
from .types.vulnerability import Severity
from .types.vulnerability import VulnerabilityNote
from .types.vulnerability import VulnerabilityOccurrence
__all__ = (
"AliasContext",
"Architecture",
"Artifact",
"AttestationNote",
"AttestationOccurrence",
"BatchCreateNotesRequest",
"BatchCreateNotesResponse",
"BatchCreateOccurrencesRequest",
"BatchCreateOccurrencesResponse",
"BuildNote",
"BuildOccurrence",
"BuildProvenance",
"CVSSv3",
"CloudRepoSourceContext",
"Command",
"CreateNoteRequest",
"CreateOccurrenceRequest",
"DeleteNoteRequest",
"DeleteOccurrenceRequest",
"DeploymentNote",
"DeploymentOccurrence",
"DiscoveryNote",
"DiscoveryOccurrence",
"Distribution",
"FileHashes",
"Fingerprint",
"GerritSourceContext",
"GetNoteRequest",
"GetOccurrenceNoteRequest",
"GetOccurrenceRequest",
"GitSourceContext",
"Hash",
"ImageNote",
"ImageOccurrence",
"Layer",
"ListNoteOccurrencesRequest",
"ListNoteOccurrencesResponse",
"ListNotesRequest",
"ListNotesResponse",
"ListOccurrencesRequest",
"ListOccurrencesResponse",
"Location",
"Note",
"NoteKind",
"Occurrence",
"PackageNote",
"PackageOccurrence",
"ProjectRepoId",
"RelatedUrl",
"RepoId",
"Severity",
"Signature",
"Source",
"SourceContext",
"UpdateNoteRequest",
"UpdateOccurrenceRequest",
"UpgradeDistribution",
"UpgradeNote",
"UpgradeOccurrence",
"Version",
"VulnerabilityNote",
"VulnerabilityOccurrence",
"WindowsUpdate",
"GrafeasClient",
)
|
[
"noreply@github.com"
] |
isabella232.noreply@github.com
|
6592fd4e7614f3aa792b9305227977f7af952754
|
6994917b9d22e9e15e578a0e5c75dcf4ce3cb022
|
/perfil/migrations/0025_auto_20200724_2157.py
|
e5dd79e636927224e5bd7b39a7907b3d99b39094
|
[] |
no_license
|
linikerunk/rh-ticket
|
59ad6411a3d08c90c2704b37ba9bba67ea7f7754
|
bd8edd3eb1ea6cfe04fee03a4f41049a84c1e14a
|
refs/heads/master
| 2023-01-06T21:25:06.851369
| 2020-10-29T20:32:53
| 2020-10-29T20:32:53
| 250,346,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
# Generated by Django 2.2.9 on 2020-07-25 00:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('perfil', '0024_auto_20200724_2049'),
]
operations = [
migrations.AlterField(
model_name='funcionario',
name='centro_de_custo_link',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='perfil.CentroDeCusto', verbose_name='Centro de Custo link'),
),
]
|
[
"linikerenem@gmail.com"
] |
linikerenem@gmail.com
|
b2e93044857996da4f7864f49e1ad69a6546cb0b
|
7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d
|
/packages/autorest.python/test/vanilla/version-tolerant/Expected/AcceptanceTests/HttpVersionTolerant/httpinfrastructureversiontolerant/aio/__init__.py
|
154a90083e55b0b0e6ba806098e8782f6d24a683
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.python
|
cc4bfbf91ae11535731cad37cedd6b733edf1ebd
|
a00d7aaa3753ef05cb5a0d38c664a90869478d44
|
refs/heads/main
| 2023-09-03T06:58:44.246200
| 2023-08-31T20:11:51
| 2023-08-31T20:11:51
| 100,315,955
| 47
| 40
|
MIT
| 2023-09-14T21:00:21
| 2017-08-14T22:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._client import AutoRestHttpInfrastructureTestService
try:
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AutoRestHttpInfrastructureTestService",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
457b3c5c3ef7b6c87f2a2cb3e30e182a396b8713
|
7eadfc1711278719d4f02cb506f1d1df88cc66af
|
/model/supervised/cnn.py
|
4110058df864f680190dd1ee7dbbb410a1114a2a
|
[] |
no_license
|
fagan2888/Trading_by_Imitation_Learning
|
33a88060e45e38d83b9b0972072cc1bcddcf0bdc
|
0f6820609c64dd0a1e697ec2ac4566b60478025d
|
refs/heads/master
| 2020-11-30T23:06:21.280800
| 2019-05-16T06:27:01
| 2019-05-16T06:27:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,477
|
py
|
"""
Train a supervised CNN model using optimal stock as label
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D
from keras.models import load_model
from keras.optimizers import Adam
from ..base_model import BaseModel
from utils.data import normalize
import numpy as np
import tensorflow as tf
class StockCNN(BaseModel):
def __init__(self, nb_classes, window_length, weights_file='weights/cnn.h5'):
self.model = None
self.weights_file = weights_file
self.nb_classes = nb_classes
self.window_length = window_length
def build_model(self, load_weights=True):
""" Load training history from path
Args:
load_weights (Bool): True to resume training from file or just deploying.
Otherwise, training from scratch.
Returns:
"""
if load_weights:
self.model = load_model(self.weights_file)
print('Successfully loaded model')
else:
self.model = Sequential()
self.model.add(
Conv2D(filters=32, kernel_size=(1, 3), input_shape=(self.nb_classes, self.window_length, 1),
activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Conv2D(filters=32, kernel_size=(1, self.window_length - 2), activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Flatten())
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(self.nb_classes, activation='softmax'))
self.model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3),
metrics=['accuracy'])
print('Built model from scratch')
self.model._make_predict_function()
self.graph = tf.get_default_graph()
def train(self, X_train, Y_train, X_val, Y_val, verbose=True):
continue_train = True
while continue_train:
self.model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_data=(X_val, Y_val),
shuffle=True, verbose=verbose)
save_weights = input('Type True to save weights\n')
if save_weights:
self.model.save(self.weights_file)
continue_train = input("True to continue train, otherwise stop training...\n")
print('Finish.')
def evaluate(self, X_test, Y_test, verbose=False):
return self.model.evaluate(X_test, Y_test, verbose=verbose)
def predict(self, X_test, verbose=False):
return self.model.predict(X_test, verbose=verbose)
def predict_single(self, observation):
""" Predict the action of a single observation
Args:
observation: (num_stocks + 1, window_length)
Returns: a single action array with shape (num_stocks + 1,)
"""
obsX = observation[:, -self.window_length:, 3:4] / observation[:, -self.window_length:, 0:1]
obsX = normalize(obsX)
obsX = np.expand_dims(obsX, axis=0)
with self.graph.as_default():
return np.squeeze(self.model.predict(obsX), axis=0)
|
[
"noreply@github.com"
] |
fagan2888.noreply@github.com
|
8e962c66a9d91dae37bddea35a9bff26c992c521
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/1101-1200/1110-Delete Nodes And Return Forest/1110-Delete Nodes And Return Forest.py
|
3219a3ad56f0c557fe3ebbf9025c4afa1c801801
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590
| 2021-10-31T09:54:53
| 2021-10-31T09:54:53
| 99,655,604
| 52
| 28
|
MIT
| 2020-10-02T12:47:47
| 2017-08-08T05:57:26
|
C++
|
UTF-8
|
Python
| false
| false
| 803
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:
td = set(to_delete)
result = []
def dfs(root, prevDel):
if root is None:
return None
if root.val in td:
dfs(root.left, True)
dfs(root.right, True)
return None
else:
if prevDel == True:
result.append(root)
root.left = dfs(root.left, False)
root.right = dfs(root.right, False)
return root
dfs(root, True)
return result
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
4db824ca06fcb183b3cdd4afb8c1407541610ba5
|
d8c1f119d1349dd8ad2e48619a8c258967cd9a31
|
/Baekjun/Dijkstra/10282. 해킹.py
|
57994f6df0e2f7d52580d8b02723c1259ebd1a75
|
[] |
no_license
|
Seonghyeony/DataStructure-Algorithm
|
c7c006ee705b68fc4d2d04dc6baaf0aeb80fc83e
|
4121289cafd0050bda408934fcb14d88052c956f
|
refs/heads/master
| 2023-04-12T16:30:17.039109
| 2021-05-08T10:31:05
| 2021-05-08T10:31:05
| 286,371,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
import heapq
def dijkstra(graph, n, start):
distances = [float('inf') for _ in range(n + 1)]
distances[start] = 0
queue = []
heapq.heappush(queue, [0, start])
while queue:
current_distance, current_node = heapq.heappop(queue)
if distances[current_node] < current_distance:
continue
for adjacent, adjacent_distance in graph[current_node]:
distance = current_distance + adjacent_distance
if distance < distances[adjacent]:
distances[adjacent] = distance
heapq.heappush(queue, [distance, adjacent])
count = 0
ret = float('-inf')
for i in range(1, n + 1):
if distances[i] != float('inf'):
count += 1
ret = max(ret, distances[i])
print(count, ret)
test_case = int(input())
for _ in range(test_case):
n, d, c = map(int, input().split())
adj = [[] for _ in range(n + 1)]
for _ in range(d):
a, b, s = map(int, input().split())
adj[b].append([a, s])
dijkstra(adj, n, c)
|
[
"sunghyun7949@naver.com"
] |
sunghyun7949@naver.com
|
a467cc78728963d989a66e2ae338212b606e652f
|
037d5d18b9b81205305e158d7d9fdad131d318cb
|
/tests/test_custom_version_base_class.py
|
cc5e981dbda96d06ba8914b794c8350a37b1e0a2
|
[] |
permissive
|
kvesteri/sqlalchemy-continuum
|
ee7acf2c961b27eab3dd8f61598d9159d801ee21
|
a7a6bd7952185b1f82af985c0271834d886a617c
|
refs/heads/master
| 2023-08-24T09:14:33.515416
| 2022-11-17T05:41:09
| 2023-07-24T23:37:12
| 10,312,759
| 479
| 134
|
BSD-3-Clause
| 2023-09-12T20:07:04
| 2013-05-27T10:30:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestCommonBaseClass(TestCase):
def create_models(self):
class TextItem(self.Model):
__tablename__ = 'text_item'
__versioned__ = {}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
class ArticleVersionBase(self.Model):
__abstract__ = True
class Article(self.Model):
__tablename__ = 'article'
__versioned__ = {
'base_classes': (ArticleVersionBase, )
}
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
self.TextItem = TextItem
self.Article = Article
self.ArticleVersionBase = ArticleVersionBase
def test_each_class_has_distinct_translation_class(self):
class_ = version_class(self.TextItem)
assert class_.__name__ == 'TextItemVersion'
class_ = version_class(self.Article)
assert class_.__name__ == 'ArticleVersion'
assert issubclass(class_, self.ArticleVersionBase)
|
[
"konsta.vesterinen@gmail.com"
] |
konsta.vesterinen@gmail.com
|
5e14ac1175f45b85ceb256c7a8522a73237bc1f4
|
2aa84f9553a0593593afff720f7dfd8c6df3adde
|
/tests/test_query_parser.py
|
3c2d26f25e088261c212386f85d9a8ce25602370
|
[] |
no_license
|
Pavel-Guseynov/sqlalchemy-searchable
|
c492f37e10e0e9054914af1f20cf799a58b9e8aa
|
6baa13193f2f2a39ba96b231ee7f88843bdd6fd0
|
refs/heads/master
| 2021-07-25T14:02:07.876195
| 2017-11-06T16:17:05
| 2017-11-06T16:17:05
| 109,388,493
| 0
| 0
| null | 2017-11-03T11:35:01
| 2017-11-03T11:35:01
| null |
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
# -*- coding: utf-8 -*-
from pyparsing import ParseException
from pytest import raises
from sqlalchemy_searchable.parser import SearchQueryParser
class TestSearchQueryParser(object):
def setup_method(self, method):
self.parser = SearchQueryParser()
def test_unicode(self):
assert self.parser.parse(u'안녕가は') == u'안녕가は:*'
def test_empty_string(self):
with raises(ParseException):
self.parser.parse('')
def test_or(self):
assert self.parser.parse('star or wars') == 'star:* | wars:*'
def test_multiple_ors(self):
assert self.parser.parse('star or or or wars') == 'star:* | wars:*'
def test_space_as_and(self):
assert self.parser.parse('star wars') == 'star:* & wars:*'
def test_multiple_spaces_as_and(self):
assert (
self.parser.parse('star wars luke') ==
'star:* & wars:* & luke:*'
)
def test_parenthesis(self):
assert self.parser.parse('(star wars) or luke') == (
'(star:* & wars:*) | luke:*'
)
def test_or_and(self):
assert (
self.parser.parse('star or wars luke or solo') ==
'star:* | wars:* & luke:* | solo:*'
)
def test_empty_parenthesis(self):
with raises(ParseException):
assert self.parser.parse('()')
def test_nested_parenthesis(self):
assert self.parser.parse('((star wars)) or luke') == (
'(star:* & wars:*) | luke:*'
)
def test_not(self):
assert self.parser.parse('-star') == (
'! star:*'
)
def test_not_with_parenthesis(self):
assert self.parser.parse('-(star wars)') == '! (star:* & wars:*)'
|
[
"konsta.vesterinen@gmail.com"
] |
konsta.vesterinen@gmail.com
|
65cc4b40f81149e478236ca1e329f99ffc8fcb82
|
e63c1e59b2d1bfb5c03d7bf9178cf3b8302ce551
|
/uri/uri_python/ad_hoc/p2456.py
|
fd6b70e0c37682c826aa6690b0340d8b59f32ede
|
[] |
no_license
|
GabrielEstevam/icpc_contest_training
|
b8d97184ace8a0e13e1c0bf442baa36c853a6837
|
012796c2ceb901cf7aa25d44a93614696a7d9c58
|
refs/heads/master
| 2020-04-24T06:15:16.826669
| 2019-10-08T23:13:15
| 2019-10-08T23:13:15
| 171,758,893
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
entry = input().split(" ")
a = int(entry[0])
b = int(entry[1])
c = int(entry[2])
d = int(entry[3])
e = int(entry[4])
if a < b and b < c and c < d and d < e:
print('C')
elif a > b and b > c and c > d and d > e:
print('D')
else:
print('N')
|
[
"gabrielestevam@hotmail.com"
] |
gabrielestevam@hotmail.com
|
2555d67c9356f76316ca075fb7052e0bc3678ccc
|
0c43ae8365998144ebc23156c12768711114e6f9
|
/web_flask/4-number_route.py
|
1aa7ec504db5b6beaf7e14f834086abdbf92165a
|
[] |
no_license
|
Nesgomez9/AirBnB_clone_v2
|
74e343ade1c418b49c8ebaee79f6319f8e971ff6
|
055c4e92c819fd0e9dec369e687c1601f243f02c
|
refs/heads/master
| 2021-05-19T12:50:35.656686
| 2020-04-23T03:33:41
| 2020-04-23T03:33:41
| 251,707,487
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
#!/usr/bin/python3
from flask import Flask
app = Flask(__name__)
@app.route("/", strict_slashes=False)
def hello_route():
return "Hello HBNB!"
@app.route("/hbnb", strict_slashes=False)
def hbnb_route():
return "HBNB"
@app.route("/c/<text>", strict_slashes=False)
def c_route(text):
text = text.replace("_", " ")
return "C {}".format(text)
@app.route("/python/<text>", strict_slashes=False)
@app.route("/python", strict_slashes=False)
def python_route(text="is cool"):
text = text.replace("_", " ")
return "Python {}".format(text)
@app.route("/number/<int:n>", strict_slashes=False)
def number_route(n):
return "{:d} is a number".format(n)
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000")
|
[
"nicolico99@hotmail.com"
] |
nicolico99@hotmail.com
|
e1fee3842a2ba41bf122a82af2236ea8f8fad717
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/v_sniper/sniper-master/cron.py
|
39485faf71647c6676d5605dd32b483c4befdc2b
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,777
|
py
|
#!/usr/bin/env python
""" This represents the cronjob that runs to check for course openings"""
from flaskext.mail import Message
import urllib
from models import db, Snipe
from soc import Soc
from app import mail, app
import datetime
from collections import namedtuple
soc = Soc()
EMAIL_SENDER = "Course Sniper <sniper@rutgers.io>"
Section = namedtuple('Section', ['number', 'index'])
def poll(subject, result=False):
""" Poll a subject for open courses. """
app.logger.warning("Polling for %s" % (subject))
# get all the course data from SOC
courses = soc.get_courses(subject)
# build information about which courses/sections are currently open.
open_data = {}
if courses is not None:
for course in courses:
course_number = course['courseNumber']
# remove leading zeroes
if course_number.isdigit():
course_number = str(int(course_number))
open_data[course_number] = []
for section in course['sections']:
section_number = section['number']
if section_number.isdigit():
section_number = str(int(section_number))
# section is open
if section['openStatus']:
open_data[course_number].append(Section(section_number, section['index']))
# all of these course numbers are open
open_courses = [course for course, open_sections in open_data.iteritems() if open_sections]
if result:
return open_data
if open_courses:
# Notify people that were looking for these courses
snipes = Snipe.query.filter(Snipe.course_number.in_(open_courses), Snipe.subject==str(subject))
for snipe in snipes:
for section in open_data[snipe.course_number]:
if section.number == snipe.section:
notify(snipe, section.index)
else:
app.logger.warning('Subject "%s" has no open courses' % (subject))
else:
app.logger.warning('Subject "%s" is not valid' % (subject))
def notify(snipe, index):
""" Notify this snipe that their course is open"""
course = '%s:%s:%s' % (snipe.subject, snipe.course_number, snipe.section)
if snipe.user.email:
attributes = {
'email': snipe.user.email,
'subject': snipe.subject,
'course_number': snipe.course_number,
'section': snipe.section,
}
# build the url for prepopulated form
url = 'http://sniper.rutgers.io/?%s' % (urllib.urlencode(attributes))
register_url = 'https://sims.rutgers.edu/webreg/editSchedule.htm?login=cas&semesterSelection=12017&indexList=%s' % (index)
email_text = 'A course (%s) that you were watching looks open. Its index number is %s. Click the link below to register for it!\n\n %s \n\n If you don\'t get in, visit this URL: \n\n %s \n\n to continue watching it.\n\n Send any feedback to sniper@rutgers.io' % (course, index, register_url, url)
# send out the email
message = Message('[Course Sniper](%s) is open' %(course), sender=EMAIL_SENDER)
message.body = email_text
message.add_recipient(snipe.user.email)
message.add_recipient(snipe.user.email)
mail.send(message)
db.session.delete(snipe)
db.session.commit()
app.logger.warning('Notified user: %s about snipe %s' % (snipe.user, snipe))
if __name__ == '__main__':
# get all the courses that should be queried.
app.logger.warning("----------- Running the Cron %s " % (str(datetime.datetime.now())))
subjects = db.session.query(Snipe.subject).distinct().all()
for subject in subjects:
poll(subject[0])
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
3119ea6af239d63712279b5d9972ab77083b0507
|
d906b38849fcb8eb26dc584dfb03d9ed5a133abb
|
/pendulum.py
|
cdc8f0dcaa2b2591c1eb4e512b073da368453a54
|
[] |
no_license
|
basicmachines/sim-dynamics
|
dd5213f10b7a8bbc325e492b41714ceee45e0c1c
|
3430651a5b684ecca4a0ceb282213070f379c2fd
|
refs/heads/master
| 2020-03-08T14:11:40.689788
| 2018-04-10T05:53:21
| 2018-04-10T05:53:21
| 128,178,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
#!/usr/bin/env python
"""Dynamic simulation of a pendulum.
"""
from pygame.locals import K_z, K_x, K_c, K_b, K_n, K_m
from simulator import Simulator
# Import the model you want to simulate
from models import Pendulum
# Import the controller(s) you want to simulate
from controllers import PIDController, KeyboardInput
TARGET_FPS = 30
# Initialize model
model = Pendulum(position=(16, 12))
# ----------- Setup Keyboard Controller ---------------
# Map the keys to the model inputs
key_actions = {
K_m: 'TP3', # Positive torque values (counter-clockwise)
K_n: 'TP2',
K_b: 'TP1',
K_z: 'TN3', # Negative torque values (clockwise)
K_x: 'TN2',
K_c: 'TN1'
}
kbd_controller = KeyboardInput(model.inputs, key_actions=key_actions)
key_instructions = [
'z, x, c - apply anti-clockwise torque',
'b, n, m - apply clockwise torque'
]
# ----------- Setup PID Controller ---------------
pid_controller = PIDController(
cv=model.outputs['a'],
mv=model.inputs,
kp=75.0,
ki=8.0,
kd=300.0,
set_point=0.0,
mv_max=7,
mv_min=-7,
bool_outputs=model.torque_settings,
time_step=1.0 / TARGET_FPS
)
# ------------- Run Simulation -----------------
simulator = Simulator(
model=model,
controllers=[kbd_controller, pid_controller],
key_instructions=key_instructions
)
simulator.run()
|
[
"bill.tubbs@me.com"
] |
bill.tubbs@me.com
|
16eb4f0e51d45f39b17d70bcf2a407765c928ad8
|
5bd4893a793ed739127f15becd9558cacf461540
|
/scripts/hit_endpoint.py
|
b86fbea7d7419186ef9c482e8f1b00b6d7f17c8a
|
[] |
no_license
|
hauensteina/ahn-repo
|
d3aa665eeef846e426b866d587e8649c8283e74c
|
93bd7c54548a083f39510fc562c9e7540c4f672a
|
refs/heads/master
| 2023-07-24T05:34:51.289699
| 2023-07-13T16:10:25
| 2023-07-13T16:10:25
| 99,860,476
| 0
| 1
| null | 2023-07-15T01:33:35
| 2017-08-09T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
#!/usr/bin/env python
# Python 3 script hitting a REST endpoint
# AHN, Jun 2019
import requests
from pdb import set_trace as BP
URL = 'https://ahaux.com/leela_server/select-move/leela_gtp_bot?tt=1234'
ARGS = {'board_size':19,'moves':[],'config':{'randomness':0.5,'request_id':'0.6834311059880898'}}
#-------------
def main():
res = hit_endpoint( URL, ARGS)
print( res)
# Hit an endpoint with a POST request
#----------------------------------------
def hit_endpoint( url, args):
try:
resp = requests.post( url, json=args)
res = resp.json()
return res
except Exception as e:
print( 'ERROR: hit_endpoint() failed: %s' % str(e))
sys.exit(1)
if __name__ == '__main__':
main()
|
[
"hauensteina@gmail.com"
] |
hauensteina@gmail.com
|
552ac5116d0dbc29272076004d4a9b916cb2a96e
|
a9fc496e0724866093dbb9cba70a8fdce12b67a9
|
/scripts/field/q59000_tuto.py
|
5925ba6e70b43edd7737755c40baf93619dad4ae
|
[
"MIT"
] |
permissive
|
ryantpayton/Swordie
|
b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0
|
ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e
|
refs/heads/master
| 2022-12-01T09:46:47.138072
| 2020-03-24T10:32:20
| 2020-03-24T10:32:20
| 253,997,319
| 2
| 0
|
MIT
| 2022-11-24T08:17:54
| 2020-04-08T05:50:22
|
Java
|
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
# Arboren : Stump Town
if not sm.hasQuest(59000): # The Town Prankster
if not sm.hasQuestCompleted(59000): # The Town Prankster
sm.removeEscapeButton()
if sm.sendAskYesNo("Would you like to skip the tutorial cutscenes?"):
#todo add after skipping tutorial
sm.dispose()
else:
sm.setPlayerAsSpeaker()
sm.sendNext("Dun, dun, dun. Hero theme song! I'm #h #, I'm from a town hidden deeep within Arboren Forest!")
sm.sendNext("I've got the coolest ears and tail, dun dun dun. They're super heroic, dun dun dun.")
sm.sendNext("And I'm gonna be a hero somedaaaaay. A hero somedaaaaay! Drumroll!")
sm.sendNext("For reals. Granny Rosanna tells me bedtime stories every night...")
sm.sendNext("Stories about the #bfive brave heroes#k, who sealed away the terrifying #bBlack Mage#k! \r\n Pew, pew, kaboom! I'm gonna be a hero just like 'em someday soon!")
sm.setSpeakerID(9390305)
#todo effects
sm.sendNext("Who'd dig a hole here!?")
sm.setPlayerAsSpeaker()
sm.sendNext("Uh oh, what's this? I smell... the need for a Hero!")
sm.dispose()
|
[
"thijsenellen@outlook.com"
] |
thijsenellen@outlook.com
|
ec7fa1f86c2a000110ed3e35ad2f81201ff443b7
|
cb062c48280311134fe22573a41f9c4d6631b795
|
/src/xm/core/txs/multisig/MultiSigVote.py
|
a05b6202d10dcb74e58f0c5ec2605ebb1c0396e9
|
[
"MIT"
] |
permissive
|
xm-blockchain/xm-core
|
da1e6bb4ceb8ab642e5d507796e2cc630ed23e0f
|
2282b435a02f061424d656155756d8f50238bcfd
|
refs/heads/main
| 2023-01-15T19:08:31.399219
| 2020-11-19T03:54:19
| 2020-11-19T03:54:19
| 314,127,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,528
|
py
|
from pyxmlib.pyxmlib import bin2hstr
from xm.core.State import State
from xm.core.StateContainer import StateContainer
from xm.core.misc import logger
from xm.core.txs.Transaction import Transaction
from xm.crypto.misc import sha256
class MultiSigVote(Transaction):
"""
MultiSigSpend for the transaction of xm from a multi sig wallet to another wallet.
"""
def __init__(self, protobuf_transaction=None):
super(MultiSigVote, self).__init__(protobuf_transaction)
@property
def shared_key(self):
return self._data.multi_sig_vote.shared_key
@property
def unvote(self):
return self._data.multi_sig_vote.unvote
@property
def prev_tx_hash(self):
return self._data.multi_sig_vote.prev_tx_hash
def set_prev_tx_hash(self, prev_tx_hash: bytes):
self._data.multi_sig_vote.prev_tx_hash = prev_tx_hash
def get_data_hash(self):
tmp_tx_hash = (self.master_addr +
self.fee.to_bytes(8, byteorder='big', signed=False) +
self.shared_key +
self.unvote.to_bytes(1, byteorder='big', signed=False))
return sha256(tmp_tx_hash)
@staticmethod
def create(shared_key: bytes,
unvote: bool,
fee: int,
xmss_pk,
master_addr: bytes = None):
multi_sig_vote = MultiSigVote()
if master_addr:
multi_sig_vote._data.master_addr = master_addr
multi_sig_vote._data.public_key = bytes(xmss_pk)
multi_sig_vote._data.multi_sig_vote.shared_key = shared_key
multi_sig_vote._data.multi_sig_vote.unvote = unvote
multi_sig_vote._data.fee = int(fee)
multi_sig_vote.validate_or_raise(verify_signature=False)
return multi_sig_vote
def _validate_custom(self):
if self.fee < 0:
logger.warning('MultiSigVote [%s] Invalid Fee = %d', bin2hstr(self.txhash), self.fee)
return False
return True
def _validate_extended(self, state_container: StateContainer):
if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]:
logger.warning("[MultiSigVote] Hard Fork Feature not yet activated")
return False
addr_from_state = state_container.addresses_state[self.addr_from]
vote_stats = state_container.votes_stats[self.shared_key]
if vote_stats is None:
logger.warning("[MultiSigVote] Invalid Shared key %s", bin2hstr(self.shared_key))
return False
multi_sig_spend_tx = state_container.multi_sig_spend_txs[self.shared_key]
block_number = state_container.block_number
if vote_stats.executed:
logger.warning("[MultiSigVote] Invalid Tx as MultiSigSpend has already been executed")
return False
if multi_sig_spend_tx is None:
logger.warning("MultiSigSpend not found, Shared Key %s", bin2hstr(self.shared_key))
return False
if block_number > multi_sig_spend_tx.expiry_block_number:
logger.warning("[MultiSigVote] Voted for expired Multi Sig Spend Txn")
logger.warning("Expiry Block Number: %s, Current Block Number: %s",
multi_sig_spend_tx.expiry_block_number,
block_number)
return False
if self.addr_from not in vote_stats.signatories:
logger.warning("Address not found in signatory list")
logger.warning("Address %s, Shared Key %s, Multi Sig Address %s",
bin2hstr(self.addr_from),
bin2hstr(self.shared_key),
bin2hstr(vote_stats.multi_sig_address))
return False
index = vote_stats.get_address_index(self.addr_from)
if vote_stats.unvotes[index] == self.unvote:
logger.warning("[MultiSigVote] Invalid as Vote type already executed")
logger.warning("Vote type %s", self.unvote)
return False
tx_balance = addr_from_state.balance
if tx_balance < self.fee:
logger.warning('[MultiSigVote] State validation failed for %s because: Insufficient funds',
bin2hstr(self.txhash))
logger.warning('balance: %s, fee: %s', tx_balance, self.fee)
return False
return True
def set_affected_address(self, addresses_set: set):
super().set_affected_address(addresses_set)
def apply(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee, subtract=True)
state_container.paginated_tx_hash.insert(address_state, self.txhash)
vote_stats = state_container.votes_stats[self.shared_key]
multi_sig_address = vote_stats.multi_sig_address
weight, found = state_container.addresses_state[multi_sig_address].get_weight_by_signatory(self.addr_from)
if not found:
logger.info("[MultiSigVote] Address is not the signatory for the multi sig address")
return False
self.set_prev_tx_hash(vote_stats.get_vote_tx_hash_by_signatory_address(self.addr_from))
if not vote_stats.apply_vote_stats(self, weight, state_container):
logger.info("[MultiSigVote] Failed to apply vote_stats")
return False
return self._apply_state_changes_for_PK(state_container)
def revert(self,
state: State,
state_container: StateContainer) -> bool:
vote_stats = state_container.votes_stats[self.shared_key]
multi_sig_address = vote_stats.multi_sig_address
weight, found = state_container.addresses_state[multi_sig_address].get_weight_by_signatory(self.addr_from)
if not found:
logger.info("[MultiSigVote] Address is not the signatory for the multi sig address")
return False
if not vote_stats.revert_vote_stats(self, weight, state_container):
logger.info("[MultiSigVote] Failed to revert vote_stats")
return False
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee)
state_container.paginated_tx_hash.remove(address_state, self.txhash)
return self._revert_state_changes_for_PK(state_container)
|
[
"74695206+xm-blockchain@users.noreply.github.com"
] |
74695206+xm-blockchain@users.noreply.github.com
|
75528b21f1eac2877ca966946d1370e81593004b
|
05c6b9f1f769ff359b757a913e0d43aeb1dfb9c6
|
/hcf.py
|
f5cd7c9c45512a220634fefb6a6049cfbdbaad6c
|
[] |
no_license
|
samarthchadda/pypi
|
bc92c0f7086ead65cb7242f7ea827470817a3b55
|
0c3308be995c8952c8db6b56aae46e58722d4d82
|
refs/heads/master
| 2020-04-01T16:48:13.337978
| 2018-11-04T10:17:59
| 2018-11-04T10:17:59
| 153,399,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
def computeHCF(x,y):
if x>y:
smaller=y
else:
smaller=x
for i in range(1,smaller+1):
if((x%i==0) and (y%i==0)):
hcf=i
return hcf
num1=int(input("Enter first number:"))
num2=int(input("Enter second nunber:"))
print("H.C.F of",num1,"and",num2,"is",computeHCF(num1,num2))
|
[
"samarthchadda@gmail.com"
] |
samarthchadda@gmail.com
|
74642e8877743f8591bc0e8ec061ab3c92d67f5a
|
6a803f0be359651a68107ccc2452be58e178d54b
|
/test/test_tojson.py
|
ba97f228529b58dca993c52a07690caadab47f87
|
[
"MIT"
] |
permissive
|
pombredanne/javaproperties-cli
|
14c8a067ec8a4af6bb8ac25e64117fafb7e0238e
|
192f96a9ffa504ed3c0fd9636f7a321b65f8cad4
|
refs/heads/master
| 2020-12-25T18:42:39.656917
| 2017-05-17T13:10:48
| 2017-05-17T13:10:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,347
|
py
|
from click.testing import CliRunner
from javaproperties_cli.tojson import properties2json
def test_properties2json_empty():
r = CliRunner().invoke(properties2json, input=b'')
assert r.exit_code == 0
assert r.output_bytes == b'{}\n'
def test_properties2json_comment_only():
r = CliRunner().invoke(properties2json, input=b'#This is a comment.\n')
assert r.exit_code == 0
assert r.output_bytes == b'{}\n'
def test_properties2json_simple():
r = CliRunner().invoke(properties2json, input=b'''
#Mon Nov 07 15:29:40 EST 2016
key = value
foo: bar
zebra apple
''')
assert r.exit_code == 0
assert r.output_bytes == b'''{
"foo": "bar",
"key": "value",
"zebra": "apple"
}
'''
def test_properties2json_scalarlike():
r = CliRunner().invoke(properties2json, input=b'''
#Mon Nov 07 15:29:40 EST 2016
key = 42
foo: 3.14
zebra null
true=false
''')
assert r.exit_code == 0
assert r.output_bytes == b'''{
"foo": "3.14",
"key": "42",
"true": "false",
"zebra": "null"
}
'''
def test_properties2json_empty_value():
r = CliRunner().invoke(properties2json, input=b'''
#Mon Nov 07 15:29:40 EST 2016
empty=
missing
''')
assert r.exit_code == 0
assert r.output_bytes == b'''{
"empty": "",
"missing": ""
}
'''
def test_properties2json_escaped_nonascii_input():
r = CliRunner().invoke(properties2json, input=b'''
#Mon Nov 07 15:29:40 EST 2016
edh: \\u00F0
snowman: \\u2603
goat: \\uD83D\\uDC10
\\u00F0: edh
\\uD83D\\uDC10: goat
\\u2603: snowman
''')
assert r.exit_code == 0
assert r.output_bytes == b'''{
"edh": "\\u00f0",
"goat": "\\ud83d\\udc10",
"snowman": "\\u2603",
"\\u00f0": "edh",
"\\u2603": "snowman",
"\\ud83d\\udc10": "goat"
}
'''
def test_properties2json_utf8_input_no_encoding():
r = CliRunner().invoke(properties2json, input=b'''
#Mon Nov 07 15:29:40 EST 2016
edh: \xC3\xB0
snowman: \xE2\x98\x83
goat: \xF0\x9F\x90\x90
\xC3\xB0: edh
\xF0\x9F\x90\x90: goat
\xE2\x98\x83: snowman
''')
assert r.exit_code == 0
assert r.output_bytes == b'''{
"edh": "\\u00c3\\u00b0",
"goat": "\\u00f0\\u009f\\u0090\\u0090",
"snowman": "\\u00e2\\u0098\\u0083",
"\\u00c3\\u00b0": "edh",
"\\u00e2\\u0098\\u0083": "snowman",
"\\u00f0\\u009f\\u0090\\u0090": "goat"
}
'''
def test_properties2json_utf8_input():
r = CliRunner().invoke(properties2json, ['--encoding', 'utf-8'], input=b'''
#Mon Nov 07 15:29:40 EST 2016
edh: \xC3\xB0
snowman: \xE2\x98\x83
goat: \xF0\x9F\x90\x90
\xC3\xB0: edh
\xF0\x9F\x90\x90: goat
\xE2\x98\x83: snowman
''')
assert r.exit_code == 0
assert r.output_bytes == b'''{
"edh": "\\u00f0",
"goat": "\\ud83d\\udc10",
"snowman": "\\u2603",
"\\u00f0": "edh",
"\\u2603": "snowman",
"\\ud83d\\udc10": "goat"
}
'''
def test_properties2json_utf16_input():
r = CliRunner().invoke(properties2json, ['--encoding', 'utf-16BE'], input=u'''
#Mon Nov 07 15:29:40 EST 2016
edh: \u00F0
snowman: \u2603
goat: \U0001F410
\u00F0: edh
\U0001F410: goat
\u2603: snowman
'''.encode('UTF-16BE'))
assert r.exit_code == 0
assert r.output_bytes == b'''{
"edh": "\\u00f0",
"goat": "\\ud83d\\udc10",
"snowman": "\\u2603",
"\\u00f0": "edh",
"\\u2603": "snowman",
"\\ud83d\\udc10": "goat"
}
'''
# repeated keys?
# invalid \u escape
|
[
"git@varonathe.org"
] |
git@varonathe.org
|
5f7c0cdac07becdf70d55f1915794e2a91b1e177
|
8c51aff248eb6f463d62e934213660437c3a107b
|
/django_project/users/views.py
|
fe5316a90de09acd0372252d67941f52e802b19c
|
[] |
no_license
|
wonjun0901/WJ_Develop_Individually
|
5f839932c189adf2b2b34f7dadbdeaa8744f8d0e
|
e0402f5dbdda8ae8292cace124d381e29f707183
|
refs/heads/master
| 2021-01-02T00:13:38.851832
| 2020-02-18T01:10:15
| 2020-02-18T01:10:15
| 239,406,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
from django.shortcuts import render, redirect
#from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from .forms import UserRegisterForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Account created for {username}!')
return redirect('blog-home')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
|
[
"wonjun0901@gmail.com"
] |
wonjun0901@gmail.com
|
e82585fce52c800d045ff51b94242a83f0126930
|
653a3d9d66f3d359083cb588fc7c9ece8bb48417
|
/test/runtime/frontend_test/onnx_test/defs_test/math_test/max_test.py
|
8aaf5db7ac2b8cfd0529aa58330acb3221cbd3dc
|
[
"Zlib",
"MIT"
] |
permissive
|
leonskim/webdnn
|
fec510254b15f3dec00f5bed8f498737b372e470
|
f97c798c9a659fe953f9dc8c8537b8917e4be7a2
|
refs/heads/master
| 2020-04-15T18:42:43.632244
| 2019-01-10T10:07:18
| 2019-01-10T10:07:18
| 164,921,764
| 0
| 0
|
NOASSERTION
| 2019-01-09T19:07:35
| 2019-01-09T19:07:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
import numpy as np
from test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model
from test.util import wrap_template, generate_kernel_test_case
from webdnn.frontend.onnx import ONNXConverter
@wrap_template
def template(n_x, x_shape, description: str = ""):
vxs = [np.random.rand(*x_shape) for _ in range(n_x)]
vys = list(vxs)
while len(vys) > 1:
vx1, vx2 = vys.pop(0), vys.pop(0)
vy = np.maximum(vx1, vx2)
vys.append(vy)
vy = vys[0]
xs = [make_tensor_value_info(f"x{i}", vx.shape) for i, vx in enumerate(vxs)]
y = make_tensor_value_info("y", vy.shape)
operator = make_node("Max", [x.name for x in xs], ["y"])
model = make_model([operator], xs, [y])
graph = ONNXConverter().convert(model)
assert tuple(vy.shape) == tuple(graph.outputs[0].shape), f"vy: {vy.shape}, graph.outputs[0]: {graph.outputs[0].shape}"
generate_kernel_test_case(
description=f"[ONNX] Max {description}",
graph=graph,
inputs={graph.inputs[i]: vx for i, vx in enumerate(vxs)},
expected={graph.outputs[0]: vy},
)
def test_2():
template(n_x=2, x_shape=[2, 3, 4, 5])
def test_3():
template(n_x=3, x_shape=[2, 3, 4, 5])
def test_4():
template(n_x=4, x_shape=[2, 3, 4, 5])
|
[
"y.kikura@gmail.com"
] |
y.kikura@gmail.com
|
ce84cfc7e6a9774842cef1a393e8ef625284ae06
|
7f189b1d917785da079276674276f68baa30df7f
|
/kmmall/pipelines.py
|
1d30292b0e08e1835ac775ee2d640e6b34c9e8bb
|
[] |
no_license
|
eninem123/kangmeimallspider
|
b79ed43781328d67e893652433e59ed094ec941a
|
b2bcca0efe0b634ca97f331242351e9cfd52c2f7
|
refs/heads/master
| 2022-12-24T04:31:31.169097
| 2018-09-20T09:58:41
| 2018-09-20T09:58:41
| 149,369,473
| 1
| 0
| null | 2022-12-08T00:45:48
| 2018-09-19T00:38:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# import json
# from scrapy.exporters import CsvItemExporter
import csv
# class KmmallPipeline(object):
# def process_item(self, item, spider):
# return item
class KmCsvPipeline(object):
def open_spider(self, spider):
# 创建文件对象
self.f = open("km2.csv", "w+")
# 这种写法还没搞清楚怎么用
# self.csv_exporter = CsvItemExporter(self.f)
# 开始进行csv数据读写
# self.csv_exporter.start_exporting()
# 创建csv文件读写对象,用来将item数据写入到指定的文件中
self.csv_writer = csv.writer(self.f, delimiter=',')
def process_item(self, item, spider):
# 将item数据通过csv文件读写对象,写入到csv文件中
# 将item变成字典对象
item = dict(item)
# 如果需要保存json就dumps一下
# item = json.dumps(item, ensure_ascii=False)
# self.csv_exporter.export_item(item.encode("utf8"))
print('*******************************************************item:', item)
print('*******************************************************item:', type(item))
# print('*******************************************************item:', item['goods_url'])
# 提取字典对象的数据类型是class:dict 数据格式类似 {item:{"key1":"val1","key2":"val2"...}}
one=item['one']
two=item['two']
two_url=item['two_url']
three=item['three']
three_url=item['three_url']
title=item['title']
title_two=item['title_two']
price=item['price']
goods_url=item['goods_url']
market_price = item['market_price']
spec=item['spec']
count_comment=item['count_comment']
goods_name=item['goods_name']
goods_no=item['goods_no']
goods_pz=item['goods_pz']
goods_logo=item['goods_logo']
goods_spec=item['goods_spec']
goods_jx=item['goods_jx']
goods_cj=item['goods_cj']
self.csv_writer.writerow([one,two,two_url,three,three_url,title,title_two,price,market_price,spec, goods_url,count_comment,goods_name,goods_no,goods_pz,goods_logo,goods_spec,goods_jx,goods_cj])
return item
def close_spider(self, spider):
# 结束csv文件读写
# self.csv_exporter.finish_exporting()
# 关闭文件,将内存缓冲区的数据写入到磁盘中
self.f.close()
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
70a5a2a8d97d47e4470a414ce3590f34bca83b74
|
22e076588057d200c7119f87d330678e7ed7d168
|
/posts/forms.py
|
8094a686d31545ae191391fd805ca09373a1291f
|
[] |
no_license
|
DylanMsK/Insta_clone_project
|
16088926bda8f66fe412016f1764076dd46a7629
|
7921bef90aad1128021bd4e2bb60f96fd0efab01
|
refs/heads/master
| 2020-05-05T13:03:29.896332
| 2019-04-18T09:01:33
| 2019-04-18T09:01:33
| 180,057,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
from django import forms
from .models import Post, Comment
class PostModelForm(forms.ModelForm):
content = forms.CharField(
label='content',
widget=forms.Textarea(
attrs={
'class': '',
'rows': 5,
'cols': 50,
'placeholder': '지금 뭘 하고 계신가요?'
})
)
class Meta:
model = Post
# input을 받을 컬럼 값을 list로 만들어 넣어줌
fields = ['content', 'image',]
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['comment',]
|
[
"kms920612@gmail.com"
] |
kms920612@gmail.com
|
18e350c9f21878bc1409a1ec2b3304e103c6c660
|
528c811306faa4a34bf51fca7955b7a24ac2e30c
|
/Python/Triangle.py
|
da2d8206529278895eea530d8c2d8f3d4bc40ef4
|
[] |
no_license
|
ganjingcatherine/LeetCode-1
|
1addbd7e4d9254a146601f9d5e28b8becb8235a6
|
488782d3f1e759da2d32b4e82dbf55b96c431244
|
refs/heads/master
| 2021-05-11T03:15:16.810035
| 2016-02-06T06:19:18
| 2016-02-06T06:19:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
"""
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.
"""
class Solution:
# @param triangle, a list of lists of integers
# @return an integer
def minimumTotal(self, triangle):
d = [[0 for _ in range(len(triangle[j]))] for j in range(len(triangle))]
for i in range(len(triangle)):
for j in range(len(triangle[i])):
if i == 0 and j == 0:
d[0][0] = triangle[0][0]
elif j == 0:
d[i][0] = triangle[i][0] + d[i-1][0]
elif j == len(triangle[i]) - 1:
d[i][j] = triangle[i][j] + d[i-1][j-1]
else:
d[i][j] = min(d[i-1][j-1],d[i-1][j]) + triangle[i][j]
result = sorted(d[len(triangle)-1])
return result[0]
class Solution:
# @param triangle, a list of lists of integers
# @return an integer
def minimumTotal(self, triangle):
N = len(triangle)
d = triangle[len(triangle)-1]
for i in reversed(range(N-1)):
for j in range(i+1):
d[j] = min(d[j],d[j+1]) + triangle[i][j]
return d[0]
|
[
"anthonyjin0619@gmail.com"
] |
anthonyjin0619@gmail.com
|
c0d01549392c14b63f25cf3ca994a4bb47d47047
|
770537437474c63f6878c26a10a5853a9687c649
|
/Service/app/subscriber.py
|
fc7c5667b54bc22b29bbde8c6796ec4cd403f98a
|
[] |
no_license
|
Makalolu/BRKACI-2945-CLUS
|
89013da0a2c828abe43b2ab39f8bb85587c625ff
|
197702202ca146e6c82cb39ad48fad8569d1393d
|
refs/heads/master
| 2022-02-22T19:00:47.438095
| 2018-06-17T17:27:52
| 2018-06-17T17:27:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
import logging, sys
from .utils import (setup_logger, get_app, pretty_print, db_is_alive, init_db,
get_apic_session, get_class, subscribe,
)
# module level logging
logger = logging.getLogger(__name__)
def dns_subscriptions(db):
""" build subscription to APIC dns objects and keep consistent values in
database. On startup, simply wipe the db since we'll be pulling new
objects (and any cached entries can be considered invalid on startup)
dnsDomain
- multiple domains supported, only one is 'default'
- track 'name' and 'isDefault' (yes/no)
- only support dnsp-default
dnsProv
- multiple providers supported, only one is preferred
- track 'addr' which should be unique and 'preferred' (yes/no)
- only support dnsp-default
"""
# initialize db to clear out all existing objects
init_db()
# read initial state and insert into database
(domains, providers) = ([], [])
session = get_apic_session()
if session is None:
logger.error("unable to connect to APIC")
return
dnsDomain = get_class(session, "dnsDomain")
dnsProv = get_class(session, "dnsProv")
if dnsDomain is None or dnsProv is None:
logger.error("failed to perform dns init")
return
for obj in dnsDomain:
attr = obj[obj.keys()[0]]["attributes"]
if "name" in attr and "dn" in attr and "isDefault" in attr:
if "/dnsp-default/" in attr["dn"]:
domains.append({
"dn": attr["dn"],
"name":attr["name"],
"isDefault": True if attr["isDefault"]=="yes" else False
})
for obj in dnsProv:
attr = obj[obj.keys()[0]]["attributes"]
if "addr" in attr and "dn" in attr and "preferred" in attr:
if "/dnsp-default/" in attr["dn"]:
providers.append({
"dn": attr["dn"],
"addr":attr["addr"],
"preferred": True if attr["preferred"]=="yes" else False
})
# insert domains and providers into database
logger.debug("inserting domains: %s, and providers: %s"%(domains,providers))
db.dnsDomain.insert_many(domains)
db.dnsProv.insert_many(providers)
# setup subscriptions to interesting objects
interests = {
"dnsDomain": {"callback": handle_dns_event},
"dnsProv": {"callback": handle_dns_event},
}
subscribe(interests)
logger.error("subscription unexpectedly ended")
def handle_dns_event(event):
""" handle created, deleted, modified events for dnsProv and dnsDomain by
updating corresponding object in db.
On successful create/delete clear dnsCache
"""
if "imdata" in event and type(event["imdata"]) is list:
for obj in event["imdata"]:
cname = obj.keys()[0]
attr = obj[cname]["attributes"]
if "status" not in attr or "dn" not in attr or \
attr["status"] not in ["created","modified", "deleted"]:
logger.warn("skipping invalid event for %s: %s" % (attr,cname))
continue
if cname not in ["dnsProv", "dnsDomain"]:
logger.debug("skipping event for classname %s" % cname)
continue
db_attr = ["dn"]
if cname == "dnsDomain": db_attr+=["name", "isDefault"]
else: db_attr+=["addr", "preferred"]
# create object that will be added/deleted/updated in db
obj = {}
for a in db_attr:
if a in attr: obj[a] = attr[a]
if "isDefault" in obj:
obj["isDefault"] = True if obj["isDefault"]=="yes" else False
if "preferred" in obj:
obj["preferred"] = True if obj["preferred"]=="yes" else False
logger.debug("%s %s obj:%s" % (cname, attr["status"], obj))
if attr["status"] == "created" or attr["status"] == "modified":
ret = db[cname].update_one(
{"dn":attr["dn"]}, {"$set":obj}, upsert=True
)
logger.debug("update_one match/modify/upsert: [%s,%s,%s]" % (
ret.matched_count, ret.modified_count, ret.upserted_id))
if attr["status"] == "deleted":
ret = db[cname].delete_one({"dn":attr["dn"]})
logger.debug("delete_one deleted: %s" % ret.deleted_count)
if attr["status"] == "created" or attr["status"] == "deleted":
logger.debug("clearing dnsCache")
db["dnsCache"].drop()
if __name__ == "__main__":
# main can be used to run subscription or just to test db access
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--check_db", action="store_true", dest="check_db",
help="check for successful db connection")
args = parser.parse_args()
try:
# setup local logger along with 'app' logger
logger = setup_logger(logger, "subscriber.log", quiet=True)
setup_logger(logging.getLogger("app"), "subscriber.log", quiet=True)
# check db is alive before executing background subscriber
if not db_is_alive():
logger.error("unable to connect to db")
sys.exit(1)
if args.check_db:
# successfully checked db already
sys.exit(0)
# run subscriptions which only stop on error
app = get_app()
with app.app_context():
db = app.mongo.db
dns_subscriptions(db)
except KeyboardInterrupt as e:
print "\ngoodbye!\n"
sys.exit(1)
|
[
"agossett@cisco.com"
] |
agossett@cisco.com
|
b2a671dfca5e7fc447b993c10a529875dc54603f
|
c7061fb106b801c12fb40ff331d927a5bb24da80
|
/BasicExerciseAndKnowledge/w3cschool/n16_format_datetime.py
|
b0f4e62a46fdd8c480a544be789ecdafb00a1d3a
|
[
"MIT"
] |
permissive
|
Jonathan1214/learn-python
|
34e6b5612beeb1a46b5964b0a4e306656355fe84
|
19d0299b30e953069f19402bff5c464c4d5580be
|
refs/heads/master
| 2020-03-27T09:03:16.785034
| 2018-08-31T02:48:34
| 2018-08-31T02:48:34
| 146,310,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
#coding:utf-8
# 题目:输出指定格式的日期
import time
import datetime
# 目的在于熟悉这个模块
print time.ctime() # localtime
print time.asctime(time.localtime())
print time.asctime(time.gmtime()) # gmt
print datetime.datetime(2018, 8, 12)
# print datetime.tzinfo
print datetime.date.today()
print datetime.date.fromtimestamp.__doc__
|
[
"jonathan1214@foxmail.com"
] |
jonathan1214@foxmail.com
|
49f0bec871aede1626dd9b0823050f24018b7413
|
c703b8ac3b5545857f6c95efa2d61eaf7a664021
|
/iPERCore/tools/human_digitalizer/deformers/__init__.py
|
e0d6c7b177b6946f7ec4806e5c0de347eece34a1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
iPERDance/iPERCore
|
d29681d229b3098b3517b1abf4f7ea65f579de73
|
fcf9a18ffd66bf3fdd3eea4153a3bc4785131848
|
refs/heads/main
| 2023-07-30T15:04:15.835396
| 2023-04-12T14:21:23
| 2023-04-12T14:21:23
| 313,664,064
| 2,520
| 339
|
Apache-2.0
| 2023-05-12T03:26:52
| 2020-11-17T15:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import torch
from .sil_deformer import SilhouetteDeformer
from .clothlinks_deformer import ClothSmplLinkDeformer
def run_sil2smpl_offsets(obs_sils, init_smpls, image_size, device=torch.device("cuda:0"),
visualizer=None, visual_poses=None):
"""
Args:
obs_sils (np.ndarray):
init_smpls (np.ndarray):
image_size (int):
device (torch.device):
visualizer (None or Visualizer):
visual_poses (None or np.ndarray):
Returns:
"""
# 1. define Deformer Solver
deform_solver = SilhouetteDeformer(image_size=image_size, device=device)
# 2. format inputs for SilhouetteDeformer.solve()
cam = init_smpls[:, 0:3]
pose = init_smpls[:, 3:-10]
shape = init_smpls[:, -10:]
obs = {
"sil": obs_sils,
"cam": cam,
"pose": pose,
"shape": shape
}
# 3. solve the offsets
offsets = deform_solver.solve(obs, visualizer, visual_poses).cpu().detach().numpy()
return offsets
|
[
"liuwen@shanghaitech.edu.cn"
] |
liuwen@shanghaitech.edu.cn
|
46f4c190ec307f397e873c46ac6abca7c00b6cba
|
e616ea35ead674ebb4e67cae54768aaaeb7d89c9
|
/project/alma/disciplines/migrations/0001_initial.py
|
cd2f83a805a6561b60a83706fe7cba9576acbc37
|
[] |
no_license
|
VWApplications/VWAlmaAPI
|
12bb1888533cf987739b0e069737afa6337141e1
|
3a8009b17518384c269dfee3c8fe44cbe2567cc0
|
refs/heads/master
| 2022-04-02T10:26:49.832202
| 2020-02-12T04:46:31
| 2020-02-12T04:46:31
| 161,098,215
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,490
|
py
|
# Generated by Django 2.1.4 on 2019-09-21 20:17
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Discipline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Title of discipline', max_length=100, verbose_name='Title')),
('institution', models.CharField(help_text='University or School in which the user is inserted.', max_length=100, verbose_name='Institution')),
('course', models.CharField(help_text='Course that is ministered the discipline', max_length=100, verbose_name='Course')),
('description', models.TextField(help_text='Description of discipline', verbose_name='Description')),
('classroom', models.CharField(default='Class A', help_text='Classroom title of discipline.', max_length=10, validators=[django.core.validators.RegexValidator(re.compile('^Class|^Turma [A-Z]$'), "Enter a valid classroom, the classroom need to be 'Class A-Z'")], verbose_name='Classroom')),
('password', models.CharField(blank=True, help_text='Password to get into the class.', max_length=30, verbose_name='Password')),
('students_limit', models.PositiveIntegerField(default=0, help_text='Students limit to get in the class.', validators=[django.core.validators.MaxValueValidator(60, 'There can be no more than %(limit_value)s students in the class.'), django.core.validators.MinValueValidator(5, 'Must have at least %(limit_value)s students in class.')], verbose_name='Students limit')),
('monitors_limit', models.PositiveIntegerField(default=0, help_text='Monitors limit to insert in the class.', validators=[django.core.validators.MaxValueValidator(5, 'There can be no more than %(limit_value)s monitors in the class.'), django.core.validators.MinValueValidator(0, 'Ensure this value is greater than or equal to %(limit_value)s.')], verbose_name='Monitors limit')),
('is_closed', models.BooleanField(default=False, help_text='Close discipline.', verbose_name='Is closed?')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Date that the discipline is created.', verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Date that the discipline is updated.', verbose_name='Updated at')),
('monitors', models.ManyToManyField(blank=True, related_name='monitor_classes', to=settings.AUTH_USER_MODEL, verbose_name='Monitors')),
('students', models.ManyToManyField(blank=True, related_name='student_classes', to=settings.AUTH_USER_MODEL, verbose_name='Students')),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='disciplines', related_query_name='discipline', to=settings.AUTH_USER_MODEL, verbose_name='Teacher')),
],
options={
'verbose_name': 'Discipline',
'verbose_name_plural': 'Disciplines',
'ordering': ['title', 'created_at'],
},
),
]
|
[
"victorhad@gmail.com"
] |
victorhad@gmail.com
|
322b370d6d03d1c9bfafe46a87d7b9c8a55eaae6
|
ce0f8956c4c308c67bd700d31fe8d5a17b16ac08
|
/Python3/src/23 Miscellaneous Topics/PDF Manipulation/02_createWatermark.py
|
b055027b0fc51eb03efc7fb7e50e4af5484af4a7
|
[] |
no_license
|
seddon-software/python3
|
795ae8d22a172eea074b71d6cd49d79e388d8cc6
|
d5e6db1509a25c1a3040d5ae82d757539a2ff730
|
refs/heads/master
| 2021-07-10T15:48:31.893757
| 2020-07-16T20:29:22
| 2020-07-16T20:29:22
| 175,872,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
from reportlab.pdfgen import canvas
point = 10
inch = 72
TEXT = "watermark"
def make_pdf_file(output_filename):
title = output_filename
h = 8.5 * inch
v = 11 * inch
grey = 0.9
c = canvas.Canvas(output_filename, pagesize=(h, v))
c.setStrokeColorRGB(0,0,0)
c.setFillColorRGB(grey, grey, grey)
c.setFont("Helvetica", 12 * point)
c.rotate(45)
c.translate(h/2, 0)
c.drawString(-h/8, 0, TEXT )
c.showPage()
c.save()
filename = "pdfs/watermark.pdf"
make_pdf_file(filename)
print(("Wrote", filename))
|
[
"seddon-software@keme.co.uk"
] |
seddon-software@keme.co.uk
|
dd1d15c77bbed78ecbb276388312c71711b89b76
|
20bb1ae805cd796a7c377e55966633441d1d9fd5
|
/CodeForces/Problems/887B Cubes for Masha/cubes.py
|
7e511bea378f4a51b6295ec6b24c35eb89ef6910
|
[] |
no_license
|
nathantheinventor/solved-problems
|
1791c9588aefe2ebdc9293eb3d58317346d88e83
|
c738e203fa77ae931b0ec613e5a00f9a8f7ff845
|
refs/heads/master
| 2022-10-27T08:58:23.860159
| 2022-10-13T20:18:43
| 2022-10-13T20:18:43
| 122,110,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
cubes = [input().split() for _ in range(int(input()))]
def canMake(s):
if len(s) == 1:
for cube in cubes:
if s in cube:
return True
return False
elif len(s) == 2:
for i, cube1 in enumerate(cubes):
if s[0] in cube1:
for j, cube2 in enumerate(cubes):
if i != j and s[1] in cube2:
return True
return False
elif len(s) == 3:
for i, cube1 in enumerate(cubes):
if s[0] in cube1:
for j, cube2 in enumerate(cubes):
if i != j and s[1] in cube2:
for k, cube3 in enumerate(cubes):
if i != k and j != k and s[2] in cube3:
return True
return False
if not canMake("1"):
print(0)
else:
for i in range(1, 1000):
if not canMake(str(i)):
print(i - 1)
break
|
[
"nathantheinventor@gmail.com"
] |
nathantheinventor@gmail.com
|
564e7ae6d142c78bcd5de942b9a6a69facdfb9d0
|
8e0cdf235cd82e422c62fee3e6d044e4f4ee7614
|
/feedback/migrations/0004_remove_translation.py
|
f0d087297b5ccce43ab6fa90c2ef41ed6fab4ac5
|
[
"BSD-3-Clause"
] |
permissive
|
stevecassidy/signbank-feedback
|
4ae1c58a95a27428d11ef4a692c52738e9a4fb6f
|
d4cb8a7f445ca42c90a69d565d43875f50251aa8
|
refs/heads/master
| 2022-01-14T05:07:31.474605
| 2022-01-12T04:18:42
| 2022-01-12T04:18:42
| 78,930,468
| 1
| 1
| null | 2017-01-14T09:10:43
| 2017-01-14T09:10:43
| null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-09-17 13:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedback', '0003_map_translation'),
]
operations = [
migrations.RemoveField(
model_name='signfeedback',
name='translation',
),
]
|
[
"steve.cassidy@mq.edu.au"
] |
steve.cassidy@mq.edu.au
|
69cc105ffb1b88b37b4962ce32f29a3d2366625d
|
1af1f89eb9a178b95d1ba023b209b7538fb151f0
|
/Algorithms/498. Diagonal Traverse.py
|
a78694dcbb277726c2c4bc88dabf90747eadcb45
|
[] |
no_license
|
0xtinyuk/LeetCode
|
77d690161cc52738e63a4c4b6595a6012fa5c21e
|
08bc96a0fc2b672282cda348c833c02218c356f1
|
refs/heads/master
| 2023-02-21T16:58:39.881908
| 2021-01-25T08:00:13
| 2021-01-25T08:00:13
| 292,037,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
sx = 0
sy = 0
m = len(matrix)
if m==0:
return []
n = len(matrix[0])
if n==0:
return []
ans = []
reverse = False
while sx<m and sy<n:
x=sx
y=sy
temp = []
while x>=0 and y<n:
temp.append(matrix[x][y])
x-=1
y+=1
if reverse:
temp.reverse()
reverse = not reverse
ans = ans + temp
if (sx==m-1):
sy+=1
else:
sx+=1
return ans
|
[
"xliu301@uottawa.ca"
] |
xliu301@uottawa.ca
|
67142483d36d0db80900abc7955171ba9822c98b
|
68cd659b44f57adf266dd37789bd1da31f61670d
|
/swea/덧셈.py
|
e5b8ab0ac443bc65fe5936e0ac9141aab0492675
|
[] |
no_license
|
01090841589/solved_problem
|
c0c6f5a46e4d48860dccb3b0288aa5b56868fbca
|
bbea2f31e5fe36cad100bc514eacd83545fb25b1
|
refs/heads/master
| 2023-07-02T23:55:51.631478
| 2021-08-04T13:57:00
| 2021-08-04T13:57:00
| 197,157,830
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
def summ(k, scr):
if scr == num:
result = []
for j in range(N):
if visited[j]:
result.append(j+1)
print(result)
return
if scr > num:
return
if k >= N:
return
visited[k] = arr[k]
summ(k+1, scr+arr[k])
visited[k] = 0
summ(k+1, scr)
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
N = len(arr)
num = 10
visited = [0] * N
summ(0, 0)
|
[
"chanchanhwan@naver.com"
] |
chanchanhwan@naver.com
|
df6d16af59ecc459d304d7406ac8442ed9b48f06
|
26771494974942f4ab18d2cd8247506c344e1d14
|
/117-populatingNextRightPointersinEachNodeII.py
|
9b9e705cd655c6bfec49ca57ca65aa58890158d4
|
[] |
no_license
|
wangyunpengbio/LeetCode
|
9f4c6076e067c5e847d662679483f737d40e8ca5
|
cec1fd11fe43177abb2d4236782c0f116e6e8bce
|
refs/heads/master
| 2020-04-29T22:28:25.899420
| 2020-04-03T07:37:26
| 2020-04-03T07:37:26
| 176,448,957
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, left, right, next):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if root == None:
return None
queue = [(1,root)]
lastLevel = 1
fillLevelQueue = []
while len(queue) != 0:
level,item = queue.pop(0)
if level == lastLevel + 1: # 临时的列表存完一层,就进行结点连接吗,然后再清空该列表
nodeNum = len(fillLevelQueue)
fillLevelQueue.append(None)
for i in range(nodeNum):
fillLevelQueue[i].next = fillLevelQueue[i+1]
# print("line"+str(i))
lastLevel = lastLevel + 1
fillLevelQueue = []
if item == None: # 如果层中间遍历到空结点,就不追加,层最后遍历到空结点也不追加
continue
fillLevelQueue.append(item) # 每次遍历到结点的时候,顺便把结点存到另一个列表中
# print(item.val)
queue.append((level + 1,item.left))
queue.append((level + 1,item.right))
return root
|
[
"wangyunpeng_bio@qq.com"
] |
wangyunpeng_bio@qq.com
|
c37e90516146a963e73064dbae83398fa95b20e3
|
1d48ddd72477de7d9ad98eef61bdfb406859b31c
|
/04. asyncio/web_scraping/test_pg.py
|
31752e74e37bf55d125a66ca1feeb9777c26d7ae
|
[] |
no_license
|
alexshchegretsov/async_techniques
|
b68d27de58bc2393520eb080838b2c72d356d2f3
|
42118504a39ccbd0bebad4ed41eba4b5c2e3d5dd
|
refs/heads/master
| 2020-12-04T06:40:34.712114
| 2020-01-06T20:59:58
| 2020-01-06T20:59:58
| 231,661,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
# -*- coding: utf-8 -*-
import asyncio
import asyncpg
async def run():
# conn = await asyncpg.connect(user="async", password="Dexter89!", database="async_db", host="127.0.0.1", port="5432")
conn = await asyncpg.connect("postgresql://async:Dexter89!@localhost/async_db")
values = await conn.fetch("""select * from talks_headers""")
await conn.close()
print(values, len(values))
if __name__ == '__main__':
asyncio.run(run())
|
[
"nydollz77@gmail.com"
] |
nydollz77@gmail.com
|
6c1bf8a8173f069af524c50af7366e3150d5b5a6
|
5adb0e3bce712efb68b241421cd12e71d0401d98
|
/tasks/ehco.py
|
acf54d41b71c3edc251e2ea7866628ff0119bf2b
|
[
"MIT"
] |
permissive
|
librestar/backend
|
8e945c3953ec59b4717704a5ebfc613ed756cba1
|
9060453d140d4c1785b370fd548be519d04047d4
|
refs/heads/main
| 2023-02-11T03:36:33.584588
| 2021-01-14T07:34:08
| 2021-01-14T07:34:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
import json
import typing as t
import ansible_runner
from uuid import uuid4
from app.db.session import SessionLocal
from app.db.models.port import Port
from app.db.models.user import User
from app.db.models.server import Server
from app.db.models.port_forward import PortForwardRule
from app.db.crud.server import get_server
from app.db.crud.port import get_port
from tasks import celery_app
from tasks.utils.runner import run_async
from tasks.utils.handlers import iptables_finished_handler, status_handler
@celery_app.task()
def ehco_runner(
port_id: int,
server_id: int,
port_num: int,
args: str = None,
remote_ip: str = None,
update_status: bool = False,
**kwargs,
):
server = get_server(SessionLocal(), server_id)
extravars = {
"host": server.ansible_name,
"local_port": port_num,
"remote_ip": remote_ip,
"ehco_args": args,
"update_status": update_status,
"update_ehco": update_status and not server.config.get('ehco'),
}
r = run_async(
server=server,
playbook="ehco.yml",
extravars=extravars,
status_handler=lambda s, **k: status_handler(port_id, s, update_status),
finished_callback=iptables_finished_handler(server, port_id, True)
if update_status
else lambda r: None,
)
return r[1].config.artifact_dir
|
[
"me@leishi.io"
] |
me@leishi.io
|
63406186486569e40cecf5de8a6cae1dc00ae400
|
f54070cd3048a3645cb25f301592a904d387a1c9
|
/python_prgrams/testpython/class.py
|
d90814e0b94bcc93934d6f3342591b4b93ec4eaa
|
[] |
no_license
|
mak705/Python_interview
|
02bded60417f1e6e2d81e1f6cde6961d95da2a8e
|
aff2d6018fd539dbcde9e3a6b3f8a69167ffca0d
|
refs/heads/master
| 2020-03-22T21:03:34.018919
| 2019-11-15T08:51:34
| 2019-11-15T08:51:34
| 140,653,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
class PartyAnimal:
x = 0
def party(self):
self.x = self.x + 1
print "so far ", self.x
an = PartyAnimal()
an.party()
an.party()
an.party()
print "Type", type(an)
print "Dir", dir(an)
#PartyAnimal.party(an)
|
[
"mak705@gmail.com"
] |
mak705@gmail.com
|
dace21adfb00aaf1f2863a3e40f9256a2a67b538
|
2d6d24c0bfee13fc4682dee52075e78a552a8d1c
|
/tests/io/test_scanners.py
|
88b4c30ae125ae42fe97d5aa7678fd851b13a7be
|
[
"MIT"
] |
permissive
|
sbiradarctr/pyTenable
|
b890875c5df3a1da676cebd57af51bc49666a7d2
|
2a6930cd7b29036780c291581d89ab33c0fd6679
|
refs/heads/master
| 2023-05-06T09:20:43.580412
| 2021-05-31T09:05:11
| 2021-05-31T09:05:11
| 371,701,521
| 0
| 0
|
MIT
| 2021-05-28T12:58:52
| 2021-05-28T12:58:52
| null |
UTF-8
|
Python
| false
| false
| 7,003
|
py
|
from tenable.errors import *
from ..checker import check, single
import uuid, pytest
@pytest.mark.vcr()
def test_scanner_control_scans_scanner_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.control_scan('nope', str(uuid.uuid4()), 'stop')
@pytest.mark.vcr()
def test_scanner_control_scans_scan_uuid_typeerror(api):
with pytest.raises(TypeError):
api.scanners.control_scan(1,1,'stop')
@pytest.mark.vcr()
def test_scanner_control_scans_action_typeerror(api):
with pytest.raises(TypeError):
api.scanners.control_scan(1,str(uuid.uuid4()), 1)
@pytest.mark.vcr()
def test_scanner_control_scans_action_unexpectedvalue(api):
with pytest.raises(UnexpectedValueError):
api.scanners.control_scan(1, str(uuid.uuid4()), 'nope')
@pytest.mark.vcr()
def test_scanner_control_scans_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.control_scan(1,
'c5e3e4c9-ee47-4fbc-9e1d-d6f39801f56c', 'stop')
@pytest.mark.vcr()
def test_scanner_control_scans_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.scanners.control_scan(1,
'c5e3e4c9-ee47-4fbc-9e1d-d6f39801f56c', 'stop')
@pytest.mark.vcr()
def test_scanner_delete_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.delete('nope')
@pytest.mark.vcr()
def test_scanner_delete_notfound(api):
with pytest.raises(NotFoundError):
api.scanners.delete(1)
@pytest.mark.vcr()
def test_scanner_delete_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.delete(scanner['id'])
@pytest.mark.skip(reason="We don't want to actually delete scanners.")
def test_scanner_delete(api, scanner):
api.scanners.delete(scanner['id'])
@pytest.mark.vcr()
def test_scanner_details_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.details('nope')
@pytest.mark.vcr()
def test_scanner_details_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.details(1)
@pytest.mark.vcr()
def test_scanner_details_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.details(scanner['id'])
@pytest.mark.vcr()
def test_scanner_details(api, scanner):
s = api.scanners.details(scanner['id'])
check(s, 'id', int)
check(s, 'uuid', 'scanner-uuid')
check(s, 'name', str)
check(s, 'type', str)
check(s, 'status', str)
check(s, 'scan_count', int)
check(s, 'engine_version', str)
check(s, 'platform', str)
check(s, 'loaded_plugin_set', str)
check(s, 'owner', str)
check(s, 'pool', bool)
@pytest.mark.vcr()
def test_scanner_edit_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.edit('nope')
@pytest.mark.vcr()
def test_sanner_edit_plugin_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], force_plugin_update='yup')
@pytest.mark.vcr()
def test_scanner_edit_ui_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], force_ui_update='yup')
@pytest.mark.vcr()
def test_scanner_edit_finish_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], finish_update='yup')
@pytest.mark.vcr()
def test_scanner_edit_registration_code_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], registration_code=False)
@pytest.mark.vcr()
def test_scanner_edit_aws_update_typeerror(api, scanner):
with pytest.raises(TypeError):
api.scanners.edit(scanner['id'], aws_update_interval='nope')
@pytest.mark.vcr()
@pytest.mark.xfail(raises=PermissionError)
def test_scanner_edit_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.edit(1, force_ui_update=True)
@pytest.mark.vcr()
def test_scanner_edit_permissionserror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.edit(scanner['id'], force_ui_update=True)
@pytest.mark.vcr()
@pytest.mark.xfail(raises=PermissionError)
def test_scanner_edit(api, scanner):
api.scanners.edit(scanner['id'], force_plugin_update=True)
@pytest.mark.vcr()
def test_scanner_get_aws_targets_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.get_aws_targets('nope')
@pytest.mark.vcr()
def test_scanner_get_aws_targets_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.get_aws_targets(1)
@pytest.mark.vcr()
@pytest.mark.xfail(raises=NotFoundError)
def test_scanner_get_aws_targets_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.scanners.get_aws_targets(1)
@pytest.mark.skip(reason="No AWS Environment to test against.")
@pytest.mark.vcr()
def test_scanner_get_aws_targets(api, scanner):
pass
@pytest.mark.vcr()
def test_scanner_key_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.get_scanner_key('nope')
@pytest.mark.vcr()
def test_scanner_key(api, scanner):
assert isinstance(api.scanners.get_scanner_key(scanner['id']), str)
@pytest.mark.vcr()
def test_get_scans_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.get_scans('nope')
@pytest.mark.vcr()
def test_get_scans_notfounderror(api):
with pytest.raises(NotFoundError):
api.scanners.get_scans(1)
@pytest.mark.vcr()
def test_get_scans_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.get_scans(scanner['id'])
@pytest.mark.vcr()
def test_get_scans(api, scanner):
assert isinstance(api.scanners.get_scans(scanner['id']), list)
@pytest.mark.vcr()
def test_list_scanners_permissionerror(stdapi):
with pytest.raises(PermissionError):
stdapi.scanners.list()
@pytest.mark.vcr()
def test_list_scanners(api):
assert isinstance(api.scanners.list(), list)
@pytest.mark.vcr()
def test_link_state_id_typeerror(api):
with pytest.raises(TypeError):
api.scanners.toggle_link_state('nope', True)
@pytest.mark.vcr()
def test_link_state_linked_typeerror(api):
with pytest.raises(TypeError):
api.scanners.toggle_link_state(1, 'nope')
@pytest.mark.vcr()
def test_link_state_permissionerror(stdapi, scanner):
with pytest.raises(PermissionError):
stdapi.scanners.toggle_link_state(scanner['id'], True)
@pytest.mark.vcr()
def test_link_state(api, scanner):
api.scanners.toggle_link_state(scanner['id'], True)
@pytest.mark.vcr()
def test_scanners_get_permissions(api, scanner):
perms = api.scanners.get_permissions(scanner['id'])
assert isinstance(perms, list)
for p in perms:
check(p, 'type', str)
check(p, 'permissions', int)
@pytest.mark.vcr()
def test_scanner_edit_permissions(api, scanner, user):
api.scanners.edit_permissions(scanner['id'],
{'type': 'default', 'permissions': 16},
{'type': 'user', 'id': user['id'], 'permissions': 16})
|
[
"steve@chigeek.com"
] |
steve@chigeek.com
|
ab6a077030d7e71350326b60b2622c761eac3670
|
ca539b0df7ca5a91f80b2e2f64e7379e69243298
|
/87.py
|
219641b62a1f8827bc7e6a09e66208ccf7bb59c1
|
[] |
no_license
|
yorick76ee/leetcode
|
9a9e5d696f3e32d9854c2ed9804bd0f98b03c228
|
d9880892fe15f9bb2916beed3abb654869945468
|
refs/heads/master
| 2020-03-18T22:59:29.687669
| 2016-07-18T19:56:55
| 2016-07-18T19:56:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,452
|
py
|
class Solution(object):
def lettercount(self,s1,s2):
dict1,dict2={},{}
for i in range(len(s1)):
if s1[i] not in dict1:
dict1[s1[i]] = 1
else:
dict1[s1[i]] += 1
if s2[i] not in dict2:
dict2[s2[i]] = 1
else:
dict2[s2[i]] += 1
for i in range(len(s1)):
char = s1[i]
try:
if dict1[char] != dict2[char]:
return False
except:
return False
return True
def recursive(self,s1,s2):
length = len(s1)
if length == 1 or s1 == s2:
return s1 == s2
if not self.lettercount(s1,s2):
return False
for i in range(1,length):
s1_one = s1[:i]
s2_one = s2[:i]
s1_two = s1[i:]
s2_two = s2[i:]
one_flag,two_flag = False,False
if (s1_one,s2_one) in self.dp:
one_flag = self.dp[(s1_one,s2_one)]
else:
one_flag = self.recursive(s1_one,s2_one)
if (s1_two,s2_two) in self.dp:
two_flag = self.dp[(s1_two,s2_two)]
else:
two_flag = self.recursive(s1_two,s2_two)
if one_flag and two_flag:
self.dp[(s1,s2)] = True
return True
for i in range(1,length):
s1_one = s1[:i]
s2_one = s2[length-i:]
s1_two = s1[i:]
s2_two = s2[:length-i]
one_flag,two_flag = False,False
if (s1_one,s2_one) in self.dp:
one_flag = self.dp[(s1_one,s2_one)]
else:
one_flag = self.recursive(s1_one,s2_one)
if (s1_two,s2_two) in self.dp:
two_flag = self.dp[(s1_two,s2_two)]
else:
two_flag = self.recursive(s1_two,s2_two)
if one_flag and two_flag:
self.dp[(s1,s2)] = True
return True
self.dp[(s1,s2)] = False
return False
def isScramble(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
self.dp = {}
return self.recursive(s1,s2)
if __name__ == '__main__':
wds= Solution()
print wds.isScramble('oatzzffqpnwcxhejzjsnpmkmzngneo','acegneonzmkmpnsjzjhxwnpqffzzto')
|
[
"641614152@qq.com"
] |
641614152@qq.com
|
365f848ad8dde1db19f683afd8439f0362e34fb7
|
e3a674666de18e3b722bfd36e54d6a32e3f0b726
|
/html/default.py
|
6971548d1f71ed3f49da66c818ddae27850fbfbf
|
[] |
no_license
|
sauloaldocker/lamp
|
92d52c3105cd1d00d816138a64de66643fda67c3
|
9088f899e9a4e7e04941518041e10630cfdf71f1
|
refs/heads/master
| 2021-01-20T04:36:21.783064
| 2017-04-02T13:22:02
| 2017-04-02T13:22:02
| 21,629,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-# enable debugging
import cgitb
import os
import sys
cgitb.enable()
print "Content-Type: text/html;charset=utf-8"
print
print "<h1>argv</h1>"
print "<table>"
for k in sys.argv:
print "<tr><td>%s</td></tr>" % (k)
print "</table>"
print "<h1>environ</h1>"
print "<table>"
for k in os.environ:
print "<tr><td><b>%s</b></td><td>%s</td></tr>" % (k, os.environ[k])
print "</table>"
print "<h1>path</h1>"
print "<table>"
for k in sys.path:
print "<tr><td>%s</td></tr>" % (k)
print "</table>"
|
[
"sauloal@gmail.com"
] |
sauloal@gmail.com
|
cd30dee9c2e39d4d74f5da68dd97c87656ac6d03
|
ecd27923efba50703a7bfbfa2ba37a8cc78560ea
|
/automatic_scraper/config/bid/liriqing/shandong_taian_ggzy_config.py
|
bd234c5293803ff68ced61e5c97669fc19eb8d3a
|
[] |
no_license
|
yougecn/work
|
fb691b072a736731083777e489712dee199e6c75
|
1b58525e5ee8a3bdecca87fdee35a80e93d89856
|
refs/heads/master
| 2022-03-03T19:14:17.234929
| 2018-04-17T12:29:19
| 2018-04-17T12:29:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,631
|
py
|
# coding: utf-8
import time
import logging
import re
logger = logging.getLogger(__name__)
author = "liriqing"
web_title = u"泰安市公共资源交易网"
data_source = 'http://www.taggzyjy.com.cn'
start_urls = [
##政府
#招标
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001001/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001004/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001005/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001006/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002001/075002001007/",
#中标
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002001/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002004/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002005/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002006/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002002/075002002007/",
#更正
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003001/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003004/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003005/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003006/",
"http://www.taggzyjy.com.cn/Front/jyxx/075002/075002003/075002003007/"
]
db_config = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': 'asd123',
'database': 'bid_data',
'table': 'zhaotoubiao'
}
# 列表页模板
index_pattern = {
"_list": {'pattern': "//tr[@height='30']", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"_next_page": {'pattern': "//td[text() = '下页 >' and @onclick]", 'type': 'xpath', 'target': 'html', 'custom_func_name': ''},
"title": {'pattern': "//a[@target='_blank']", 'type': 'xpath', 'target': 'text', 'custom_func_name': ''},
"issue_time": {'pattern': "//td[@width='80']", 'type': 'xpath', 'target': 'text', 'custom_func_name': ''},
}
# 详情页模板
detail_pattern = {
"sc": {'pattern': "//td[@id='TDContent']/div[1]", 'type': 'xpath', 'target': 'clean_html', 'custom_func_name': ''},
}
def init(item):
"""初始化时执行"""
logger.info(u'init item: %s', item)
item['_web_title'] = item['web_title']
del item['web_title']
item['region']=u'山东-泰安市'
item['_delay_between_pages'] = 3
def process_list_item(list_element, item):
"""处理列表页元素
:param list_element: _list模板解析出的html元素
:param item:
获取列表页后,根据_list模板获取每一个详情html代码后执行
有些内容可在列表页获取,可自定义在此处理,如:
item['pub_date'] = pq(list_element).find('span').text()
"""
item['issue_time'] = int(time.mktime(time.strptime(item['issue_time'][1:-1], "%Y-%m-%d")))
if '075002001'in item['_current_start_url']:
item['bid_type']= 1
elif '075002002'in item['_current_start_url']:
item['bid_type']= 0
elif '075002003' in item['_current_start_url']:
item['bid_type'] = 2
# 停止翻页
# if item['_current_page'] == 10:
# item['_click_next'] = False
def process_detail_item(item):
"""处理详情页
:param item:
获取详情页信息,存入item后执行
可在此处理程序无法处理的情况
如详情页无法解析发布时间,需要使用正则表达式从content中提取等
"""
if len(item['sc']) > 0:
item['is_get'] = 1
else:
item['is_get'] = 0
|
[
"iwechen123@gmail.com"
] |
iwechen123@gmail.com
|
51b0ecc3f68e0a7f94297a54e5a5c33b9f699b5b
|
658e2e3cb8a4d5343a125f7deed19c9ebf06fa68
|
/course_DE/udacity-data-engineering-projects-master/Project 5 - Data Pipelines with Airflow/exercises/dags/3_ex3_subdags/subdag.py
|
2751def0ecb6a5a10629e528018801bbdaf2210a
|
[] |
no_license
|
yennanliu/analysis
|
3f0018809cdc2403f4fbfe4b245df1ad73fa08a5
|
643ad3fed41961cddd006fadceb0e927f1db1f23
|
refs/heads/master
| 2021-01-23T21:48:58.572269
| 2020-10-13T22:47:12
| 2020-10-13T22:47:12
| 57,648,676
| 11
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
# Instructions
# In this exercise, we’ll place our S3 to RedShift Copy operations into a SubDag.
# 1 - Consolidate HasRowsOperator into the SubDag
# 2 - Reorder the tasks to take advantage of the SubDag Operators
import datetime
from airflow import DAG
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.udacity_plugin import HasRowsOperator
from airflow.operators.udacity_plugin import S3ToRedshiftOperator
import sql_statements.py
# Returns a DAG which creates a table if it does not exist, and then proceeds
# to load data into that table from S3. When the load is complete, a data
# quality check is performed to assert that at least one row of data is
# present.
def get_s3_to_redshift_dag(
parent_dag_name,
task_id,
redshift_conn_id,
aws_credentials_id,
table,
create_sql_stmt,
s3_bucket,
s3_key,
*args, **kwargs):
dag = DAG(
f"{parent_dag_name}.{task_id}",
**kwargs
)
create_task = PostgresOperator(
task_id=f"create_{table}_table",
dag=dag,
postgres_conn_id=redshift_conn_id,
sql=create_sql_stmt
)
copy_task = S3ToRedshiftOperator(
task_id=f"load_{table}_from_s3_to_redshift",
dag=dag,
table=table,
redshift_conn_id=redshift_conn_id,
aws_credentials_id=aws_credentials_id,
s3_bucket=s3_bucket,
s3_key=s3_key
)
#
# TODO: Move the HasRowsOperator task here from the DAG
#
create_task >> copy_task
#
# TODO: Use DAG ordering to place the check task
#
return dag
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
36d6859f91412f1d9bc50c8d9093e25601f1b157
|
854b94d7be92582bd191a7cb63143a95e5b5c337
|
/hyfetch/distros/postmarketos_small.py
|
4dc2bd42a651c2a3c7f18c7ef7c07c17cd241449
|
[
"MIT"
] |
permissive
|
hykilpikonna/hyfetch
|
673c0c999d0f3f542349824495ad6004f450ebac
|
98863df16d70b030696f4b94080d114396320f35
|
refs/heads/master
| 2023-08-17T10:41:10.289997
| 2023-08-17T03:37:23
| 2023-08-17T03:37:23
| 479,913,941
| 447
| 78
|
MIT
| 2023-09-14T14:39:18
| 2022-04-10T04:38:15
|
Shell
|
UTF-8
|
Python
| false
| false
| 325
|
py
|
# This file is automatically generated. Please do not modify.
from . import AsciiArt
postmarketos_small = AsciiArt(match=r'''"postmarketos_small"''', color='2 7', ascii=r"""
${c1} /\
/ \
/ \
\__ \
/\__ \ _\
/ / \/ __
/ / ____/ \
/ \ \ \
/_____/ /________\
""")
|
[
"me@hydev.org"
] |
me@hydev.org
|
4998d14e229e37f835bbecc90cd2f99ce4d68860
|
78efa54b2b253f99ea7e073f783e6121c20cdb52
|
/Codechef/Maximize The Sum.py
|
6c263f96896aaeb642979ffca927fdf582635a67
|
[] |
no_license
|
NishchaySharma/Competitve-Programming
|
32a93581ab17f05d20129471f7450f34ec68cc53
|
1ec44324d64c116098eb0beb74baac7f1c3395bb
|
refs/heads/master
| 2020-04-08T04:02:46.599398
| 2020-01-01T15:51:39
| 2020-01-01T15:51:39
| 159,000,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
for _ in range(int(input())):
n=int(input())
arr=sorted(list(map(int,input().split())))
res=0
for i in range(n//2):
res+=abs(arr[i]-arr[n-i-1])
print(res)
|
[
"noreply@github.com"
] |
NishchaySharma.noreply@github.com
|
3f59c6edd6e5a5576e24f61b7997b031a064e4d7
|
a62c3f0f641c930d74aa4a43e14b0f1e8de71b5f
|
/pages/views.py
|
d3ee28ea642f9016e0fb679d2d6d97a165b998b5
|
[
"MIT"
] |
permissive
|
ticotheps/scenic-realty-app
|
b2b02f509cff51d40d88c07fe5afff7c65c73c0c
|
c91caaee019d4790d444d02067a1a8e83ed554ba
|
refs/heads/develop
| 2020-12-02T09:37:58.467839
| 2020-02-10T18:15:58
| 2020-02-10T18:15:58
| 230,966,666
| 0
| 0
|
MIT
| 2020-02-10T18:15:59
| 2019-12-30T19:10:19
|
CSS
|
UTF-8
|
Python
| false
| false
| 204
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return render(request, 'pages/index.html')
def about(request):
return render(request, 'pages/about.html')
|
[
"ticotheps@gmail.com"
] |
ticotheps@gmail.com
|
85115d1212270dde95742797c7074e489bb195c8
|
e9c0b70cab39fa771db383fa882436c14ae9aec7
|
/pizza_app/migrations/0001_initial.py
|
ece9b436685209c0100e8865b75f0d5b8d49abde
|
[
"MIT"
] |
permissive
|
rusrom/django_pizza_project
|
f4b67b558a6238b58e285f1b9eb38bf1c8cbadf5
|
350862ca49b91f5d5d4e12105846ecc9e4fc15c0
|
refs/heads/master
| 2020-07-16T05:45:07.229049
| 2019-09-02T14:14:21
| 2019-09-02T14:14:21
| 205,732,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-09-02 12:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PizzaShop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
('logo', models.ImageField(upload_to='logo/')),
('owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='pizzashop', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"rusrom@guyfawkes.33mail.com"
] |
rusrom@guyfawkes.33mail.com
|
5858339fb5fa9dbe8b8188ff43641fdd371396b9
|
1ee10e1d42b59a95a64d860f0477a69b016d1781
|
/Lecture_03/Lecture Code/10_Matcher_3_Lexical_Attibutes.py
|
00f1d77a02bad808777d7d520f42ccb07444ce0b
|
[] |
no_license
|
KushalIsmael/NLP
|
5564070a573d251d7222dda85b8025ae1f9c3c6f
|
d4ce567a009e149b0cb1781d3a341d25aa438916
|
refs/heads/master
| 2023-08-18T14:07:48.646386
| 2021-10-28T19:09:25
| 2021-10-28T19:09:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
import spacy
from spacy.matcher import Matcher
nlp = spacy.load("en_core_web_sm")
matcher = Matcher(nlp.vocab)
pattern = [{"IS_DIGIT": True}, {"LOWER": "fifa"}, {"LOWER": "world"},
{"LOWER": "cup"}, {"IS_PUNCT": True}]
matcher.add("FIFA", [pattern])
doc = nlp("2018 FIFA World Cup: France won!")
matches = matcher(doc)
for match_id, start, end in matches:
matched_span = doc[start:end]
print(matched_span.text)
|
[
"amir.h.jafari@okstate.edu"
] |
amir.h.jafari@okstate.edu
|
23de31fa7213263f9a98e2bd707d3c2d771dd3be
|
eda36d24a1e6d4f30597ab1e1b2d8e17694f93bd
|
/weio/tests/test_turbsim.py
|
2afe6ac46c1e982c0352cf1e40abbc37dad84357
|
[
"MIT"
] |
permissive
|
ebranlard/weio
|
31fdab7a8afde9919f66fab942dad309f8d8d0e2
|
50fab087c5dc3e0248bcce578de6e713fa3e9b5f
|
refs/heads/main
| 2023-07-23T19:32:42.548855
| 2022-12-19T08:13:06
| 2022-12-19T08:13:06
| 152,828,434
| 25
| 20
|
MIT
| 2023-01-13T20:37:29
| 2018-10-13T02:44:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
import unittest
import os
import numpy as np
from .helpers_for_test import MyDir, reading_test
try:
from weio.turbsim_file import TurbSimFile
except:
from weio.weio.turbsim_file import TurbSimFile
class Test(unittest.TestCase):
def test_001_read_all(self, DEBUG=True):
reading_test('TurbSim_*.*', TurbSimFile)
def test_TurbSim(self):
# --- Test without tower
F = TurbSimFile(os.path.join(MyDir,'TurbSim_NoTwr.bts'))
F.write( os.path.join(MyDir,'TurbSim_NoTwr_TMP.bts'))
F2= TurbSimFile(os.path.join(MyDir,'TurbSim_NoTwr_TMP.bts'))
os.remove( os.path.join(MyDir,'TurbSim_NoTwr_TMP.bts'))
np.testing.assert_almost_equal(F['u'][0,:,:,:],F2['u'][0,:,:,:],4)
np.testing.assert_almost_equal(F['u'][1,:,:,:],F2['u'][1,:,:,:],4)
np.testing.assert_almost_equal(F['u'][2,:,:,:],F2['u'][2,:,:,:],4)
# --- Test with tower
F = TurbSimFile(os.path.join(MyDir,'TurbSim_WithTwr.bts'))
np.testing.assert_almost_equal(F['u'][2,-1,1,3], 0.508036, 5)
np.testing.assert_almost_equal(F['u'][0, 4,2,0], 7.4867466, 5)
np.testing.assert_almost_equal(F['uTwr'][0, 4, :], [6.1509, 6.4063, 8.9555, 7.6943], 4)
F.write( os.path.join(MyDir,'TurbSim_WithTwr_TMP.bts'))
F2= TurbSimFile(os.path.join(MyDir,'TurbSim_WithTwr_TMP.bts'))
os.remove( os.path.join(MyDir,'TurbSim_WithTwr_TMP.bts'))
np.testing.assert_almost_equal(F['u'][0,:,:,:],F2['u'][0,:,:,:],3)
np.testing.assert_almost_equal(F['u'][1,:,:,:],F2['u'][1,:,:,:],3)
np.testing.assert_almost_equal(F['u'][2,:,:,:],F2['u'][2,:,:,:],3)
if __name__ == '__main__':
# Test().test_000_debug()
unittest.main()
|
[
"emmanuel.branlard@nrel.gov"
] |
emmanuel.branlard@nrel.gov
|
b1c10929ca27cebfc8f32d5fa3e33f13d3744bd3
|
c251401a04faee549a5255745dc976c2be8e24b9
|
/work_orders/permissions.py
|
15b4821acb2a82a375b098f4d93f2ef74b862691
|
[] |
no_license
|
fengo4142/aero-django-backend
|
a43a3526b570730fd9d519b8e890e550ff9f9f3c
|
53167b52b68b30eef6a10edea47888ba0ad71a4e
|
refs/heads/master
| 2022-11-11T10:01:50.534513
| 2020-06-24T15:40:11
| 2020-06-24T15:40:11
| 274,699,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,948
|
py
|
import logging
from rest_framework.permissions import BasePermission
from work_orders.models import WorkOrderForm
logger = logging.getLogger('backend')
# *****************************************************************************
# ***************************** WORK ORDERS *******************************
# *****************************************************************************
class CanCreateWorkOrders(BasePermission):
"""Allows to create a work order"""
def has_permission(self, request, view):
if request.user is None or not request.user.is_authenticated:
return False
# if (request.method == 'POST' and request.user.has_perm(
# "work_orders.add_workorder")):
# return True
if (request.method == 'POST' and request.user.aerosimple_user and \
request.user.aerosimple_user.has_permission("add_workorder")):
return True
return False
class CanViewWorkOrders(BasePermission):
"""Allows to view work orders list and detail """
def has_permission(self, request, view):
if request.user is None or not request.user.is_authenticated:
return False
# if (request.method == 'GET' and request.user.has_perm(
# "work_orders.view_workorder")):
# return True
if (request.method == 'GET' and request.user.aerosimple_user and \
request.user.aerosimple_user.has_permission("view_workorder")):
return True
return False
class CanFillMaintenanceForm(BasePermission):
"""Allows to create a Maintenance form"""
def has_permission(self, request, view):
if request.user is None or not request.user.is_authenticated:
return False
woform = WorkOrderForm.objects.get(
airport__id=request.user.aerosimple_user.airport_id)
role = woform.maintenance_form.assigned_role
users = woform.maintenance_form.assigned_users
has_role = role in request.user.aerosimple_user.roles.all()
is_assigned = request.user.aerosimple_user in users.all()
if (request.method == 'POST' and request.user.aerosimple_user
and request.user.aerosimple_user.has_permission("add_maintenance")
and request.user.aerosimple_user.has_permission("view_workorder")
and (has_role or is_assigned)):
return True
return False
class CanFillOperationsForm(BasePermission):
"""Allows to create a Operations form"""
def has_permission(self, request, view):
if request.user is None or not request.user.is_authenticated:
return False
woform = WorkOrderForm.objects.get(
airport__id=request.user.aerosimple_user.airport_id)
role = woform.operations_form.assigned_role
users = woform.operations_form.assigned_users
has_role = role in request.user.aerosimple_user.roles.all()
is_assigned = request.user.aerosimple_user in users.all()
if (request.method == 'POST' and request.user.aerosimple_user
and request.user.aerosimple_user.has_permission("add_operations")
and request.user.aerosimple_user.has_permission("view_workorder")
and (has_role or is_assigned)):
return True
return False
class CanEditWorkOrderSchema(BasePermission):
"""Allows to create work order schema instances"""
def has_permission(self, request, view):
if request.user is None or not request.user.is_authenticated:
return False
# if (request.method == 'POST' and request.user.has_perm(
# "work_orders.add_workorderschema")):
# return True
if (request.method == 'POST' and request.user.aerosimple_user and \
request.user.aerosimple_user.has_permission("add_workorderschema")):
return True
return False
|
[
"fengo4142@gmail.com"
] |
fengo4142@gmail.com
|
e41d486baf0f584817240d5dfb4283ad35235fff
|
a80884040ce1c178274a3068d216f440dd541844
|
/tests/operators/test_group_by.py
|
148a994d874624aae29cd6aea6bd533dc90abce8
|
[
"MIT"
] |
permissive
|
maki-nage/rxsci
|
a4aae51edc1ef684b55df22e34c11aa1d54ef740
|
915e59ebf593c4b313265bb87cf0e1209ec2ee0f
|
refs/heads/master
| 2023-01-19T14:32:11.638497
| 2023-01-17T08:06:35
| 2023-01-17T08:06:35
| 242,592,973
| 9
| 2
|
MIT
| 2022-11-08T21:54:16
| 2020-02-23T21:23:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,013
|
py
|
import rx
import rx.operators as ops
import rxsci as rs
from ..utils import on_probe_state_topology
def test_group_by_obs():
source = [1, 2, 2, 1]
actual_error = []
actual_completed = []
actual_result = []
mux_actual_result = []
def on_completed():
actual_completed.append(True)
store = rs.state.StoreManager(store_factory=rs.state.MemoryStore)
rx.from_(source).pipe(
rs.state.with_store(
store,
rx.pipe(
rs.ops.group_by(
lambda i: i,
rx.pipe(
ops.do_action(mux_actual_result.append),
),
))
),
).subscribe(
on_next=actual_result.append,
on_completed=on_completed,
on_error=actual_error.append,
)
assert actual_error == []
assert actual_completed == [True]
assert actual_result == source
assert type(mux_actual_result[0]) is rs.state.ProbeStateTopology
assert mux_actual_result[1:] == [
rs.OnCreateMux((0 ,(0,)), store),
rs.OnNextMux((0, (0,)), 1, store),
rs.OnCreateMux((1, (0,)), store),
rs.OnNextMux((1, (0,)), 2, store),
rs.OnNextMux((1, (0,)), 2, store),
rs.OnNextMux((0, (0,)), 1, store),
rs.OnCompletedMux((0, (0,)), store),
rs.OnCompletedMux((1, (0,)), store),
]
def test_group_by_list():
source = [1, 2, 2, 1]
actual_error = []
actual_completed = []
actual_result = []
mux_actual_result = []
def on_completed():
actual_completed.append(True)
store = rs.state.StoreManager(store_factory=rs.state.MemoryStore)
rx.from_(source).pipe(
rs.state.with_store(
store,
rx.pipe(
rs.ops.group_by(
lambda i: i,
[
ops.do_action(mux_actual_result.append),
],
))
),
).subscribe(
on_next=actual_result.append,
on_completed=on_completed,
on_error=actual_error.append,
)
assert actual_error == []
assert actual_completed == [True]
assert actual_result == source
assert type(mux_actual_result[0]) is rs.state.ProbeStateTopology
assert mux_actual_result[1:] == [
rs.OnCreateMux((0 ,(0,)), store),
rs.OnNextMux((0, (0,)), 1, store),
rs.OnCreateMux((1, (0,)), store),
rs.OnNextMux((1, (0,)), 2, store),
rs.OnNextMux((1, (0,)), 2, store),
rs.OnNextMux((0, (0,)), 1, store),
rs.OnCompletedMux((0, (0,)), store),
rs.OnCompletedMux((1, (0,)), store),
]
def test_group_by_without_store():
actual_error = []
rx.from_([1, 2, 3, 4]).pipe(
rs.ops.group_by(
lambda i: i % 2 == 0,
pipeline=rx.pipe(
)
)
).subscribe(on_error=actual_error.append)
assert type(actual_error[0]) is ValueError
def test_forward_topology_probe():
actual_topology_probe = []
source = [1, 2, 3, 4]
rx.from_(source).pipe(
rs.state.with_memory_store(
rx.pipe(
rs.ops.group_by(
lambda i: i % 2 == 0,
pipeline=rx.pipe()
),
on_probe_state_topology(actual_topology_probe.append),
)
),
).subscribe()
assert len(actual_topology_probe) == 1
def test_empty_source():
source = []
actual_result = []
on_completed = []
actual_error = []
rx.from_(source).pipe(
rs.state.with_memory_store(
rx.pipe(
rs.ops.group_by(
lambda i: i % 2 == 0,
pipeline=[]
),
)
),
).subscribe(
on_next=actual_result.append,
on_completed=lambda: on_completed.append(True),
on_error=actual_error.append,
)
assert actual_result == []
|
[
"romain.picard@oakbits.com"
] |
romain.picard@oakbits.com
|
ef4f31488ff1d5936c39d77fc37b29c55734102e
|
4500003dcaa3eb92e2b9c6bca8987ec473fb5ec3
|
/core/migrations/0006_post_slug.py
|
db41286dfce7136c7c34e38796bac248d7291c36
|
[] |
no_license
|
alikhundmiri/simpleweddingdjango
|
0bb2bfc069bac075d759efa96eede55c68595cf4
|
57aa6576df368fde651f7f2b6863f693bbb57756
|
refs/heads/master
| 2022-12-17T22:36:18.674974
| 2020-06-14T08:10:09
| 2020-06-14T08:10:09
| 239,115,495
| 0
| 0
| null | 2022-12-08T03:51:09
| 2020-02-08T11:01:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 866
|
py
|
# Generated by Django 3.0.3 on 2020-03-29 16:37
from django.db import migrations, models
from core.utils import random_string_generator
from django.utils.text import Truncator
from django.utils.text import slugify
def gen_slug(apps, schema_editor):
MyModel = apps.get_model('core', 'Post')
for row in MyModel.objects.all():
if not row.slug:
row.slug = slugify((Truncator(row.title).chars(200) +'-'+ random_string_generator(size=4)))
row.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20200329_2203'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(max_length=200, null=True),
),
migrations.RunPython(gen_slug, reverse_code=migrations.RunPython.noop),
]
|
[
"salikhundmiri@gmail.com"
] |
salikhundmiri@gmail.com
|
b5450b3f9c338676f9ab05092e450396a19672b0
|
5d5f6ba3bdcb52b4750a5f28afa8a1a1019bfc9e
|
/django/extras/djangoForms/djangoFormApp/models.py
|
464d6d00fd8196fb2c75dbf55badc599443656b0
|
[] |
no_license
|
eDiazGtz/pythonLearning
|
06e96f2f5a6e48ac314cb815cf9fbf65d0b7c2c8
|
57d7b2292cf5d9769cce9adf765962c3c0930d6c
|
refs/heads/master
| 2023-06-18T02:16:09.293375
| 2021-05-03T18:09:52
| 2021-05-03T18:09:52
| 335,090,531
| 0
| 0
| null | 2021-05-03T18:09:53
| 2021-02-01T21:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 758
|
py
|
from django.db import models
# Create your models here.
class UserManager(models.Manager):
def createValidator(self, postData):
errors = {}
if len(postData['firstName']) < 1:
errors["firstName"] = "First Name should be at least 1 character"
if len(postData['lastName']) < 1:
errors["lastName"] = "Last Name should be at least 1 character"
if len(postData['email']) > 50:
errors["email"] = "Email max length 50 Characters"
return errors
class User(models.Model):
firstName = models.CharField(max_length=17)
lastName = models.CharField(max_length=20)
email = models.CharField(max_length=50)
password = models.CharField(max_length=100)
objects = UserManager()
|
[
"ediaz-gutierrez@hotmail.com"
] |
ediaz-gutierrez@hotmail.com
|
1d1e5c80adae2a85e36764be6c6786ca13998bc7
|
3a771b72dae1aae406b94726bcbcf73915577b18
|
/q38.py
|
0a85a5450c76b409276bf18b448122f28c6bc171
|
[] |
no_license
|
SHANK885/Python-Basic-Programs
|
4fcb29280412baa63ffd33efba56d9f59770c9dc
|
157f0f871b31c4523b6873ce5dfe0d6e26a6dc61
|
refs/heads/master
| 2021-07-18T18:24:10.455282
| 2018-11-19T07:02:27
| 2018-11-19T07:02:27
| 138,009,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
'''
Define a function which can generate a list where the values are square of numbers between 1 and 20 (both included).
Then the function needs to print the last 5 elements in the list.
'''
def lis(lower,upper):
l = []
for i in range(lower,upper+1):
l.append(i)
print(l[-5:])
lis(1,20)
|
[
"shashankshekhar885@gmail.com"
] |
shashankshekhar885@gmail.com
|
b42d376714e61221c9b1932afe6a308354078de5
|
523fb785bda41e33546c929a5c2de6c93f98b434
|
/专题学习/链表/mergeKLists.py
|
89db71c8897b4a8abf67d8c47ea987374e83a389
|
[] |
no_license
|
lizhe960118/TowardOffer
|
afd2029f8f9a1e782fe56ca0ff1fa8fb37892d0e
|
a0608d34c6ed96c9071cc3b9bdf70c95cef8fcbd
|
refs/heads/master
| 2020-04-27T10:33:21.452707
| 2019-05-02T10:47:01
| 2019-05-02T10:47:01
| 174,259,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
#把所有链表放入数组,排序后建立新链表返回
res_list = []
for l in lists:
while(l):
res_list.append(l.val)
l = l.next
res_list.sort()
dummy = ListNode(-1)
head = dummy
for num in res_list:
head.next = ListNode(num)
head = head.next
return dummy.next
|
[
"2957308424@qq.com"
] |
2957308424@qq.com
|
89e4b0cc8cc580454793178a3e90e399b693f848
|
1cd853babf022779f3392eb9e1781f952d4f2c07
|
/proposal.py
|
45a17c9f760c1ab2575741bea87304eb7b516340
|
[
"Apache-2.0"
] |
permissive
|
ksrhamdi/proCon3
|
84b53027305f609267393701b49f3e7efade9097
|
f0d214651dae5cbdbd4f7ff881269fb1cc5501ad
|
refs/heads/master
| 2022-11-10T06:58:07.931219
| 2020-06-03T18:01:10
| 2020-06-03T18:01:10
| 276,995,886
| 0
| 0
|
Apache-2.0
| 2020-07-03T22:15:11
| 2020-07-03T22:15:10
| null |
UTF-8
|
Python
| false
| false
| 5,885
|
py
|
# Import external modules.
from google.appengine.api import memcache
from google.appengine.ext import ndb
import logging
import random
import time
# Import local modules.
from configuration import const as conf
from constants import Constants
const = Constants()
const.MAX_RETRY = 3
const.MIN_REAGGREGATE_DELAY_SEC = 60
# Parent key: RequestForProposals? No, use KeyProperty instead.
class Proposal(ndb.Model):
requestId = ndb.StringProperty() # May be null
title = ndb.StringProperty()
detail = ndb.StringProperty()
creator = ndb.StringProperty()
allowEdit = ndb.BooleanProperty()
voteAggregateStartTime = ndb.IntegerProperty()
numPros = ndb.IntegerProperty( default=0 )
numCons = ndb.IntegerProperty( default=0 )
netPros = ndb.IntegerProperty( default=0 ) # numPros - numCons
lastSumUpdateTime = ndb.IntegerProperty( default=0 )
@ndb.transactional( retries=const.MAX_RETRY )
def setEditable( proposalId, editable ):
proposalRecord = Proposal.get_by_id( int(proposalId) )
proposalRecord.allowEdit = editable
proposalRecord.put()
#####################################################################################
# Use tasklets for async counting pros/cons per proposal.
# If enough delay since voteAggregateStartTime... updates voteAggregateStartTime and returns flag.
@ndb.transactional( retries=const.MAX_RETRY )
def __setVoteAggStartTime( proposalId ):
proposalRecord = Proposal.get_by_id( int(proposalId) )
now = int( time.time() )
if proposalRecord.voteAggregateStartTime + const.MIN_REAGGREGATE_DELAY_SEC > now:
return False
proposalRecord.voteAggregateStartTime = now
proposalRecord.put()
return True
# Retrieves all reason vote counts for a proposal, sums their pro/con counts, and updates proposal pro/con counts.
@ndb.tasklet
def __updateVoteAggs( proposalId ):
reasons = yield Reason.query( Reason.proposalId==proposalId ).fetch_async() # Async
numPros = sum( reason.voteCount for reason in reasons if reason.proOrCon == conf.PRO )
numCons = sum( reason.voteCount for reason in reasons if reason.proOrCon == conf.CON )
__setNumProsAndCons( proposalId, numPros, numCons ) # Transaction
#####################################################################################
# Use sharded counter to count pros/cons per proposal.
const.NUM_SHARDS = 10
const.SHARD_KEY_TEMPLATE = '{}-{}'
const.COUNTER_CACHE_SEC = 10
class ProposalShard( ndb.Model ):
requestId = ndb.StringProperty()
proposalId = ndb.StringProperty()
numPros = ndb.IntegerProperty( default=0 )
numCons = ndb.IntegerProperty( default=0 )
@ndb.tasklet
def incrementTasklet( requestId, proposalId, prosInc, consInc ):
logging.debug( 'proposal.incrementAsync() proposalId={}'.format(proposalId) )
yield __incrementShard( requestId, proposalId, prosInc, consInc ) # Pause and wait for async transaction
# Cache sums in Proposal record, to make top proposals queryable by score.
# Rate-limit updates to Proposal, by storing last-update time
now = int( time.time() )
updateNow = yield __checkAndSetLastSumTime( proposalId, now ) # Pause and wait for async transaction
logging.debug( 'proposal.incrementAsync() updateNow=' + str(updateNow) )
if updateNow:
shardRecords = yield __getProposalShardsAsync( proposalId ) # Pause and wait for async
numPros = sum( s.numPros for s in shardRecords if s )
numCons = sum( s.numCons for s in shardRecords if s )
logging.debug( 'proposal.incrementAsync() numPros=' + str(numPros) + ' numCons=' + str(numCons) )
yield __setNumProsAndConsAsync( proposalId, numPros, numCons ) # Pause and wait for async transaction
logging.debug( 'proposal.incrementAsync() __setNumProsAndCons() done' )
@ndb.transactional_async( retries=const.MAX_RETRY )
def __incrementShard( requestId, proposalId, prosInc, consInc ):
shardNum = random.randint( 0, const.NUM_SHARDS - 1 )
shardKeyString = const.SHARD_KEY_TEMPLATE.format( proposalId, shardNum )
shardRec = ProposalShard.get_by_id( shardKeyString )
if shardRec is None:
shardRec = ProposalShard( id=shardKeyString, requestId=requestId, proposalId=proposalId )
shardRec.numPros += prosInc
shardRec.numCons += consInc
shardRec.put()
@ndb.transactional_async( retries=const.MAX_RETRY )
def __checkAndSetLastSumTime( proposalId, now ):
logging.debug( 'proposal.__checkAndSetLastSumTime() proposalId={}'.format(proposalId) )
proposalRecord = Proposal.get_by_id( int(proposalId) )
logging.debug( 'proposal.__checkAndSetLastSumTime() proposalRecord={}'.format(proposalRecord) )
if proposalRecord.lastSumUpdateTime + const.COUNTER_CACHE_SEC < now:
proposalRecord.lastSumUpdateTime = now
proposalRecord.put()
return True
else:
return False
def __getProposalShardsAsync( proposalId ):
shardKeyStrings = [ const.SHARD_KEY_TEMPLATE.format(proposalId, s) for s in range(const.NUM_SHARDS) ]
logging.debug( 'proposal.__getProposalShardsAsync() shardKeyStrings=' + str(shardKeyStrings) )
shardKeys = [ ndb.Key(ProposalShard, s) for s in shardKeyStrings ]
return ndb.get_multi_async( shardKeys )
@ndb.transactional_async( retries=const.MAX_RETRY )
def __setNumProsAndConsAsync( proposalId, numPros, numCons ):
__setNumProsAndConsImp( proposalId, numPros, numCons )
@ndb.transactional( retries=const.MAX_RETRY )
def __setNumProsAndCons( proposalId, numPros, numCons ):
__setNumProsAndConsImp( proposalId, numPros, numCons )
def __setNumProsAndConsImp( proposalId, numPros, numCons ):
proposalRecord = Proposal.get_by_id( int(proposalId) )
proposalRecord.numPros = numPros
proposalRecord.numCons = numCons
proposalRecord.netPros = numPros - numCons
proposalRecord.put()
|
[
"you@example.com"
] |
you@example.com
|
ad8bc92067a56e68d2d6a41e02f85a5fc6f954e0
|
1d9a6406c859fda186f520bb4472c551fc572c7b
|
/src/hopla/cli/groupcmds/hatch.py
|
e3c85019653f241bbc5b6a5ab861095a0e1e838d
|
[
"Apache-2.0"
] |
permissive
|
rickie/hopla
|
af21b794ce6719d402721550e1ee4091790410b6
|
24a422194e42c03d5877dc167b2b07147326a595
|
refs/heads/main
| 2023-08-13T17:33:03.612293
| 2021-10-12T12:13:25
| 2021-10-12T12:13:25
| 408,538,704
| 0
| 0
|
Apache-2.0
| 2021-09-20T17:30:15
| 2021-09-20T17:30:15
| null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
#!/usr/bin/env python3
"""
The module with CLI code that handles the `hopla hatch` GROUP command.
"""
import sys
from typing import NoReturn
import click
import requests
from hopla.hoplalib.hatchery.hatchcontroller import HatchRequester
@click.group()
def hatch():
"""GROUP for hatching eggs."""
def hatch_egg(*, egg_name: str, potion_name: str) -> NoReturn:
"""
Hatch an egg by performing an API request and echo the result to the
terminal.
"""
requester = HatchRequester(
egg_name=egg_name,
hatch_potion_name=potion_name
)
response: requests.Response = requester.post_hatch_egg_request()
json: dict = response.json()
if json["success"] is True:
click.echo(f"Successfully hatched a {egg_name}-{potion_name}.")
sys.exit(0)
click.echo(f"{json['error']}: {json['message']}")
sys.exit(1)
|
[
"31448155+melvio@users.noreply.github.com"
] |
31448155+melvio@users.noreply.github.com
|
6af9434c46be76fce9d56f3ea60f2fca581ad793
|
bc0dd74217258c8bdd30e6095dfd7a3edca2dd09
|
/assignments/CarND-Vehicle-Detection-P5/f2f.py
|
c7ddcf21d773d672880881636ee6f76213c48ccd
|
[] |
no_license
|
akamlani/selfdrivingcar
|
d645872f4129fcd4c68c3d4967fdd9c784086cc8
|
eadd43b4c6d60c71e283b7c43cba61030377eb47
|
refs/heads/master
| 2020-06-12T10:19:55.748107
| 2017-05-02T18:44:52
| 2017-05-02T18:44:52
| 75,585,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
import numpy as np
import cv2
from scipy.ndimage.measurements import label
import viz_utils as viz
class Vehicle(object):
def __init__(self):
self.detected = False # was vehicle detected in last iteration
self.n_detections = 0 # number of times this vehicle has been seen
self.n_nondetections = 0 # number of consecutive times this hard has not been detected
self.xpixels = None # pixel x values of last detection
self.ypixels = None # pixel y values of last detection
self.recent_xfitted = [] # x position of last n fits of the bounding box
self.recent_yfitted = [] # y position of last n fits of bounding box
self.recent_wfitted = [] # width position of last n fits of bounding box
self.recent_hfitted = [] # height position of last n fits of bounding box
self.bestx = None # average x position of last n fits
self.besty = None # average y position of last n fits
self.bestw = None # average width of last n fits
self.besth = None # average height of last n fits
class F2FTracker(object):
def __init__(self, dimensions, window_size=10):
"""
window_size: 1 for single image, else window over multiple frames
"""
self.nframes = 0 # frame_cnt
self.window_size = window_size # nframes
self.threshold = 0 if window_size == 1 else 1
rows, cols = dimensions
self.heatmap = np.zeros((rows, cols, window_size), dtype=np.float32)
def process_frame(self, base_img, heatmap_coords):
# get current heatmap
window_idx = self.nframes % self.window_size
heat_curr = viz.add_heat(base_img, heatmap_coords)
self.heatmap[:, :, window_idx] = heat_curr
# create a smooth heatmap over a window of frames
curr_slice = self.heatmap[:, :, :self.nframes + 1]
item = curr_slice if self.nframes < self.window_size else self.heatmap
heat_smooth = np.mean(item, axis=2)
# improve heatmap instances
heat_thresh = viz.apply_threshold(heat_smooth, threshold=1)
# annotate image via heatmap
labels = label(heat_thresh)
draw_img = viz.draw_labeled_bboxes(base_img, labels)
self.nframes += 1
return draw_img, heat_thresh, labels
|
[
"akamlani@gmail.com"
] |
akamlani@gmail.com
|
555ab84accb35fdd7a4be6c3279a0dfd0fda301b
|
71acb7214efd91c0d327f6d8958e1798eadb4401
|
/locations/spiders/lidl_be.py
|
82d333e830b64a9538b85a87b7b5987b418fa8c1
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
alltheplaces/alltheplaces
|
21b9f8b4ace1352e52ae7b8f8825a930d2cb033e
|
1bcbb55cfcf06f2c714465570711f6e83f205c22
|
refs/heads/master
| 2023-08-30T19:45:35.098658
| 2023-08-30T17:51:54
| 2023-08-30T17:51:54
| 61,166,935
| 453
| 176
|
NOASSERTION
| 2023-09-14T17:16:40
| 2016-06-15T01:09:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
import re
from locations.hours import DAYS_FR, OpeningHours, day_range, sanitise_day
from locations.spiders.lidl_gb import LidlGBSpider
from locations.storefinders.virtualearth import VirtualEarthSpider
class LidlBESpider(VirtualEarthSpider):
name = "lidl_be"
item_attributes = LidlGBSpider.item_attributes
dataset_id = "2be5f76f36e8484e965e84b7ee0cd1b1"
dataset_name = "Filialdaten-BE/Filialdaten-BE"
key = "AvGfUYinH_I7qdNZWDlXTHHysoytHWqkqZpxHBN9Z0Z0YLQup0u6qZoB8uQXUW_p"
def parse_item(self, item, feature, **kwargs):
item["name"] = feature["ShownStoreName"]
oh = OpeningHours()
for day, start_time, end_time in re.findall(
r"(\w+ - \w+|\w+) (\d{2}:\d{2})-(\d{2}:\d{2})",
feature["OpeningTimes"],
):
if "-" in day:
start_day, end_day = day.split("-")
start_day = sanitise_day(start_day, DAYS_FR)
end_day = sanitise_day(end_day, DAYS_FR)
else:
start_day = sanitise_day(day, DAYS_FR)
end_day = None
if start_day and end_day:
for d in day_range(start_day, end_day):
oh.add_range(d, start_time, end_time)
elif start_day:
oh.add_range(start_day, start_time, end_time)
item["opening_hours"] = oh.as_opening_hours()
yield item
|
[
"noreply@github.com"
] |
alltheplaces.noreply@github.com
|
149a32f41cf34c3a51f8d317601177f0d4f27b59
|
067573d864754a7ce73014086cd6c9165e2b5ea0
|
/scripts/pMSSMtree.cfg.py
|
a99460e708046205f3dac742f4ace7e7d0d8f716
|
[] |
no_license
|
UhhCmsAnalysis/Run2pMSSM
|
3f586d8dcbaacd4de2ed908062fe9875b43fef4c
|
bb6c7c7309108b26ff1d8f2062f712d9b848555a
|
refs/heads/master
| 2020-12-21T08:53:50.884254
| 2020-02-09T20:33:58
| 2020-02-09T20:33:58
| 236,379,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
FILE = open("scripts/analyses.cfg.py")
exec(FILE)
FILE.close()
#################################
# parameters, masses, etc
#################################
treeCfg = [
[ "params",
{"files":"idata/parameters/params_batch*.txt","base":""}],
[ "moreparams",
{"files":"idata/moreparams/moreparams.txt","base":""}],
[ "fs",
{"files":"idata/fs/fs.txt","base":"fs"}],
[ "lilith",
{"files":"idata/moreparams/lilith.txt"}],
# [ "xsect13",
# {"files":"idata/xsect/xsect_13*txt","base":"","skip_ID":[],"skip_col":"pointName"}],
[ "xsect8",
{"files":"idata/xsect/xsect_8*txt","base":"","skip_ID":[2321,8344,6640],"skip_col":"pointName"}],
[ "xsect7",
{"files":"idata/xsect/xsect_7*txt","base":"","skip_ID":[2321,8344,6640]}],
]
datadir = "idata"
#################################
# likelihoods
#################################
def addLlhd2Cfg(anaList,ext=""):
for ana in anaList:
for sr in ana[1]:
base = ana[0]
base += sr
base += ext.replace(".","")
key = base + "_llhd"
files = datadir + "/" + ana[0] + "/llhd" + sr + ext + ".txt"
treeCfg.append([key,{"files":files,"base":base}])
addLlhd2Cfg(ana7)
addLlhd2Cfg(ana8)
addLlhd2Cfg(ana13)
addLlhd2Cfg(ana7n8n13)
#################################
# Z-values
#################################
def addZ2Cfg(anaList,ext=""):
for ana in anaList:
for sr in ana[1]:
base = ana[0]
base += sr
base += ext.replace(".","_")
key = base + "_Z"
files = datadir + "/" + ana[0] + "/Z" + sr + ext + ".txt"
treeCfg.append([key,{"files":files,"base":base}])
#addZ2Cfg(ana7)
#addZ2Cfg(ana8)
#addZ2Cfg(ana7n8)
addZ2Cfg(ana7z)
addZ2Cfg(ana8z)
addZ2Cfg(ana13z)
addZ2Cfg(ana7n8n13z)
addZ2Cfg(ana7n8n13lossyz)
################################
# print
################################
#for entry in treeCfg:
# print entry[0],entry[1]
|
[
"samuel.bein@gmail.com"
] |
samuel.bein@gmail.com
|
6bacb134a528804dff45b812c5ea7e73e151f3ac
|
0add7953d3e3ce2df9e8265102be39b758579753
|
/built-in/TensorFlow/Research/cv/image_classification/Cars_for_TensorFlow/automl/vega/model_zoo/model_zoo.py
|
5deb87cd07e7947c8ec193b4da018690b923ef91
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Huawei-Ascend/modelzoo
|
ae161c0b4e581f8b62c77251e9204d958c4cf6c4
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
refs/heads/master
| 2023-04-08T08:17:40.058206
| 2020-12-07T08:04:57
| 2020-12-07T08:04:57
| 319,219,518
| 1
| 1
|
Apache-2.0
| 2023-03-24T22:22:00
| 2020-12-07T06:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 3,377
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Model zoo."""
import torch
import logging
import os
from vega.model_zoo.torch_vision_model import get_torchvision_model_file
from vega.search_space.networks import NetworkDesc, NetTypes
from vega.core.common import TaskOps
from vega.core.common.general import General
class ModelZoo(object):
"""Model zoo."""
@classmethod
def set_location(cls, location):
"""Set model zoo location.
:param location: model zoo location.
:type localtion: str.
"""
General.model_zoo.model_zoo_path = location
@classmethod
def get_model(cls, model_desc=None, model_checkpoint=None):
"""Get model from model zoo.
:param network_name: the name of network, eg. ResNetVariant.
:type network_name: str or None.
:param network_desc: the description of network.
:type network_desc: str or None.
:param model_checkpoint: path of model.
:type model_checkpoint: str.
:return: model.
:rtype: model.
"""
try:
network = NetworkDesc(model_desc)
model = network.to_model()
except Exception as e:
logging.error("Failed to get model, model_desc={}, msg={}".format(
model_desc, str(e)))
raise e
logging.info("Model was created.")
logging.debug("model_desc={}".format(model_desc))
if model_checkpoint is not None:
logging.info("Load model with weight.")
model = cls._load_pretrained_model(network, model, model_checkpoint)
logging.info("Model was loaded.")
return model
@classmethod
def _load_pretrained_model(cls, network, model, model_checkpoint):
if not model_checkpoint and network._model_type == NetTypes.TORCH_VISION_MODEL:
model_file_name = get_torchvision_model_file(network._model_name)
full_path = "{}/torchvision_models/checkpoints/{}".format(
TaskOps().model_zoo_path, model_file_name)
else:
full_path = model_checkpoint
logging.info("load model weights from file.")
logging.debug("Weights file: {}".format(full_path))
if not os.path.isfile(full_path):
raise "Pretrained model is not existed, model={}".format(full_path)
checkpoint = torch.load(full_path)
model.load_state_dict(checkpoint)
return model
@classmethod
def infer(cls, model, dataloader):
"""Infer the result."""
model.eval()
infer_result = []
with torch.no_grad():
model.cuda()
for _, input in enumerate(dataloader):
if isinstance(input, list):
input = input[0]
logits = model(input.cuda())
if isinstance(logits, tuple):
logits = logits[0]
infer_result.extend(logits)
return infer_result
|
[
"1571856591@qq.com"
] |
1571856591@qq.com
|
cb1c16ee59fe20890a221136d81fcc1734dc8a2d
|
940bdfb1d2014e0fdf8c1d138efb43935446864a
|
/ayush_crowdbotics_347/settings.py
|
24c34f26d61c779f77db6396b510cd90b427c8e0
|
[] |
no_license
|
payush/ayush-crowdbotics-347
|
f8568a28c0fd328161e9961d1f4ffc73ed1ff3de
|
08b235df039628147296a723f18dc976317479db
|
refs/heads/master
| 2020-03-23T19:49:01.171461
| 2018-07-23T11:14:46
| 2018-07-23T11:14:46
| 142,003,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,130
|
py
|
"""
Django settings for ayush_crowdbotics_347 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'br8n%%zz9_*mw+%so6e=q21!m$82iugifwit)lyt@s^w207*4w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ayush_crowdbotics_347.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ayush_crowdbotics_347.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
|
[
"ayushpuroheet@gmail.com"
] |
ayushpuroheet@gmail.com
|
0343a12712af23f99051af1d1eb45efc8aa04b53
|
5dfa9dfb2d2d604f54de7020aed11642f03f1186
|
/SLAC/dark_defects_offline/v0/validator_dark_defects_offline.py
|
30eb49152670cb1873fc87d6cdb693baf4218fea
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lsst-camera-dh/harnessed-jobs
|
49a9a65f3368771ff7b7b22caa94fc8f384681f4
|
352f48b70633b0f0e3faf941198edf1de85f4989
|
refs/heads/master
| 2021-03-19T16:57:36.199351
| 2019-03-10T21:18:46
| 2019-03-10T21:18:46
| 34,645,042
| 0
| 1
| null | 2018-04-03T23:37:34
| 2015-04-27T03:59:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
#!/usr/bin/env python
import lsst.eotest.sensor as sensorTest
import lcatr.schema
import siteUtils
import eotestUtils
sensor_id = siteUtils.getUnitId()
mask_file = '%s_dark_pixel_mask.fits' % sensor_id
eotestUtils.addHeaderData(mask_file, LSST_NUM=sensor_id, TESTTYPE='SFLAT_500',
DATE=eotestUtils.utc_now_isoformat(),
CCD_MANU=siteUtils.getCcdVendor().upper())
results = [lcatr.schema.fileref.make(mask_file)]
eotest_results = '%s_eotest_results.fits' % sensor_id
data = sensorTest.EOTestResults(eotest_results)
amps = data['AMP']
npixels = data['NUM_DARK_PIXELS']
ncolumns = data['NUM_DARK_COLUMNS']
for amp, npix, ncol in zip(amps, npixels, ncolumns):
results.append(lcatr.schema.valid(lcatr.schema.get('dark_defects'),
amp=amp,
dark_pixels=npix,
dark_columns=ncol))
results.append(siteUtils.packageVersions())
lcatr.schema.write_file(results)
lcatr.schema.validate_file()
|
[
"jchiang@slac.stanford.edu"
] |
jchiang@slac.stanford.edu
|
46747fbc3c33b336048baf27aad12d4a044b8473
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/yfooETHj3sHoHTJsv_11.py
|
3b80e0b8989222b1ece889e3f7396b901396c028
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
"""
Create a function that returns `True` when `num1` is equal to `num2`;
otherwise return `False`.
### Examples
is_same_num(4, 8) ➞ False
is_same_num(2, 2) ➞ True
is_same_num(2, "2") ➞ False
### Notes
Don't forget to `return` the result.
"""
def is_same_num(num1, num2):
if num1==num2:
return True
else: return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
be8933396f92ba4e0bbc0f721914a0ef71410726
|
20cf2cb73adfed63cf182fc12a09aa3aadc033c6
|
/filter.py
|
ba1f301e7c35530bd36538e7e6db9a0ebf49052c
|
[] |
no_license
|
arunkumar27-ank-tech/Python-Programs
|
678ae558e8c141a6302e2705849c97258974c4eb
|
a56788057d1bf8848681e38eb569874d84db7337
|
refs/heads/master
| 2023-06-16T14:50:36.146381
| 2021-07-15T13:57:54
| 2021-07-15T13:57:54
| 386,305,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from functools import reduce
lst = [1, 2, 3, 4, 5, 6, 7, 8, 9]
evens = list(filter(lambda n: n % 2 == 0, lst))
doubles = list(map(lambda n: n+2, evens))
sum1 = reduce(lambda a, b: a+b, doubles)
print(evens)
print(doubles)
print(sum1)
|
[
"arunkumar834428@gmail.com"
] |
arunkumar834428@gmail.com
|
bf77466fc9d42438623ab91fe345fb7f007eef5d
|
cca70e45645d5b96f98b1328833d5b4ebb1c882e
|
/P20/P06.py
|
cf4525656b07a2c7601c33937201708a72cf69c6
|
[] |
no_license
|
webturing/Python3Programming_19DS12
|
9613a9808407b6abef0bc89ad8f74fc3920e789f
|
5bbc1e10cec0ebf7d5dfb415a9d4bb07ce0b32ca
|
refs/heads/master
| 2020-08-01T10:23:09.474316
| 2019-12-27T11:52:34
| 2019-12-27T11:52:34
| 210,964,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
'''
赵、钱、孙、李、周五人围着一张圆桌吃饭。饭后,周回忆说:“吃饭时,赵坐在钱旁边,钱的左边是孙或李”;李回忆说:“钱坐在孙左边,我挨着孙坐”。
结果他们一句也没有说对。请问,他们在怎样坐的?
'''
def left(a, b):
return a + 1 == b or a == 5 and b == 1
def right(a, b):
return left(b, a)
def adj(a, b):
return right(a, b) or left(a, b)
zhao, qian, sun, li, zhou = 1, 1, 1, 1, 1
for qian in range(2, 6):
for sun in range(2, 6):
if sun == qian: continue
for li in range(2, 6):
if li == qian or li == sun:
continue
zhou = 15 - zhao - qian - sun - li
if adj(zhao, qian) or left(qian, sun) or left(qian, li):
continue
if left(sun, qian) or adj(sun, li):
continue
print("%d %d %d %d %d" % (zhao, qian, sun, li, zhou))
|
[
"zj@webturing.com"
] |
zj@webturing.com
|
c433d7fe29d312b80fbac7fc3888a4c7c7dd2223
|
39c861da8f362874baac3f7e4aab089b18125dab
|
/ghostwriter/modules/exceptions.py
|
be1d30a317dbe958adf73cba0a39823fd06cbd43
|
[
"BSD-3-Clause"
] |
permissive
|
chrismaddalena/Ghostwriter
|
47cdc2111695e19335430326cdf4f880b728be22
|
f197be35497ae97c6b90ba17a820ec04e4254c53
|
refs/heads/master
| 2022-07-09T02:14:12.382165
| 2022-06-07T23:19:15
| 2022-06-07T23:19:15
| 202,816,974
| 3
| 0
|
BSD-3-Clause
| 2022-03-09T21:07:37
| 2019-08-17T00:37:18
|
Python
|
UTF-8
|
Python
| false
| false
| 761
|
py
|
"""This contains all of the custom exceptions for the Ghostwriter application."""
class MissingTemplate(Exception):
"""
Exception raised when a report template is missing for a report.
**Attributes**
``message``
Error message to be displayed
"""
def __init__(self, message="No report template selected"):
self.message = message
super().__init__(self.message)
class InvalidFilterValue(Exception):
"""
Exception raised when an invalid value is passed to a report template filter.
**Attributes**
``message``
Error message to be displayed
"""
def __init__(self, message="Invalid value provided to filter"):
self.message = message
super().__init__(self.message)
|
[
"chris.maddalena@protonmail.com"
] |
chris.maddalena@protonmail.com
|
2c6f44399105c6eaf015fa79e82a8722f392705f
|
e13c98f36c362717fdf22468b300321802346ef5
|
/home/migrations/0005_auto_20161130_1514.py
|
a8eda78bd3708cdb4cd0d223a5be51a7bbc35b45
|
[] |
no_license
|
alexmon1989/libraries_portal
|
2415cc49de33459266a9f18ed8bb34ac99d3eb7c
|
277081e09f6347c175775337bffba074a35f3b92
|
refs/heads/master
| 2021-01-23T07:25:53.884795
| 2018-12-25T14:29:29
| 2018-12-25T14:29:29
| 80,501,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-30 13:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20161130_1402'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='home.City', verbose_name='Город'),
),
]
|
[
"alex.mon1989@gmail.com"
] |
alex.mon1989@gmail.com
|
c1ecba608b38e7e151190d9428b136119b3a8902
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/third_party/skia/gyp/icu.gyp
|
4a985032c26d61b2145ef092b2b838626d4a11de
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-public-domain"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 3,713
|
gyp
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'common_variables.gypi',
],
'variables': {
'component%': 'static_library',
'icu_directory': '../third_party/externals/icu'
},
'targets': [
{
'target_name': 'icuuc',
'type': '<(component)',
'sources': [
'<!@(python find.py ../third_party/externals/icu/source/common "*.c*")'
],
'defines': [
'U_COMMON_IMPLEMENTATION',
'U_HIDE_DATA_SYMBOL',
'U_USING_ICU_NAMESPACE=0',
'HAVE_DLOPEN=0',
'UCONFIG_NO_NON_HTML5_CONVERSION=1',
],
'include_dirs': [ '<(icu_directory)/source/common', ],
'direct_dependent_settings': {
'defines': [
'U_USING_ICU_NAMESPACE=0',
'U_ENABLE_DYLOAD=0',
],
'include_dirs': [ '<(icu_directory)/source/common', ],
'conditions': [
[
'component=="static_library"', {
'defines': [
'U_STATIC_IMPLEMENTATION',
],
}
],
],
},
'cflags': [ '-w' ],
'cflags_cc': [ '-frtti', ],
'conditions': [
[
'component=="static_library"', {
'defines': [ 'U_STATIC_IMPLEMENTATION', ],
}
],
[
'OS == "win"', {
'sources': [
'<(icu_directory)/source/stubdata/stubdata.c',
],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [ '<(icu_directory)/windows/icudt.dll', ],
},
],
'msvs_disabled_warnings': [4005, 4068, 4244, 4355, 4996, 4267],
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [ '/EHsc', ],
},
},
'configurations': {
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeTypeInfo': 'true', # /GR
},
},
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeTypeInfo': 'true', # /GR
},
},
},
},
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'advapi32.lib',
],
},
},
},
}
],
[
'OS == "win" and skia_clang_build', {
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
# See http://bugs.icu-project.org/trac/ticket/11122
'-Wno-inline-new-delete',
'-Wno-implicit-exception-spec-mismatch',
],
},
},
}
],
[
'skia_os == "android"', {
'sources': [ '<(icu_directory)/android/icudtl_dat.S', ],
}
],
[
'skia_os == "linux"', {
'sources': [ '<(icu_directory)/linux/icudtl_dat.S', ],
}
],
[
'skia_os == "mac"', {
'sources': [ '<(icu_directory)/mac/icudtl_dat.S', ],
'xcode_settings': {
'GCC_ENABLE_CPP_RTTI': 'YES', # -frtti
'WARNING_CFLAGS': [ '-w' ],
},
}
],
], # conditions
},
], # targets
}
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
1e1d5ccfdb2caa614c32a09ee07729393624758c
|
4c672231bd8b7c23bd5773ef990404cc3146712a
|
/shipmaster/server/celery.py
|
8e24f72855c7e156d14e3e37290140aeabcf16b0
|
[
"BSD-3-Clause"
] |
permissive
|
AzureCloudMonk/shipmaster
|
b0e82f93308ecc829e6f6b3cb3156f11dcfbadd4
|
cf596be7ea689c26c4bf47acb67dfd15169d3c46
|
refs/heads/master
| 2020-11-30T01:51:32.010852
| 2018-03-03T21:47:17
| 2018-03-03T21:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shipmaster.server.settings')
from celery import Celery
from django.conf import settings
app = Celery('shipmaster.server')
app.config_from_object(settings)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
[
"lex@damoti.com"
] |
lex@damoti.com
|
60a9319cb5e51a72ea6172acb56753d27d908782
|
9aa52f7e5902ea8f4a2810809218d9631446345d
|
/backend/course/api/v1/serializers.py
|
94b376e43c63bba2216fc46a5939adf50d3f51d9
|
[] |
no_license
|
crowdbotics-apps/merchandising-plays-21542
|
e662e42b8766a2fc24d6e0ab926580de0b580461
|
c0298b28a45a617b88984d074af4a69f4ea00700
|
refs/heads/master
| 2022-12-29T10:31:41.304017
| 2020-10-15T18:39:00
| 2020-10-15T18:39:00
| 304,412,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
462ff12ed72a87b6f46032cc0eeb6fd1d11f6baf
|
af669dbef653dd69474f4c0836582bf14262c80f
|
/price-test/frame/lib/commonlib/configure/configunit.py
|
d59369edd113378ff64e2167f6f76406ff180d06
|
[] |
no_license
|
siki320/fishtest
|
7a3f91639d8d4cee624adc1d4d05563611b435e9
|
7c3f024192e1c48214b53bc45105bdf9e746a013
|
refs/heads/master
| 2021-01-19T21:58:36.807126
| 2017-04-19T09:56:37
| 2017-04-19T09:56:37
| 88,729,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
#!/usr/bin/env python
# -*- coding: GB18030 -*-
'''
Created on 2012-3-10
@author: tongdangdang
'''
class ConfigUnit(object):
'''
@author: tongdangdang
@summary: ub conf configure unit
'''
def __init__(self,key,value,father,note = ""):
self.key = key
self.value = value
self.level = -1
self.father = father
self.note = note
'''
@summary: user defined str
'''
def __str__(self):
return self.value
def __getitem__(self, key):
return self.value
#def __delitem__(self, key):
# if isinstance(self.father, configarray.ConfigArray):
# pass
# elif isinstance(self.father, configarray.ConfigArray):
# pass
|
[
"lisiqi_i@didichuxing.com"
] |
lisiqi_i@didichuxing.com
|
8bc1f3af1ca811d884a225dbd76851c0ad13c46a
|
1da15a0ec8eb771d4584b3997d44d2af23d53484
|
/D3/1220.Magnetic.py
|
2da7b7c711faaafaf1586b556cbc79aeea42fe62
|
[] |
no_license
|
cdh3261/Algorithm_Problems
|
1e9ad0310490ffe5396f8cef3205885d62ebefb7
|
d9ad791e9a0bcdd1c13b8e18fa993b784a53b064
|
refs/heads/master
| 2020-08-29T07:27:04.331917
| 2020-03-06T11:33:57
| 2020-03-06T11:33:57
| 217,966,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
####### N극 #######
####### S극 #######
for t in range(1, 11):
n = int(input())
arr = [list(map(int, input().split())) for i in range(n)]
col = []
for i in range(n):
a = []
for j in range(n):
if arr[j][i] != 0:
a.append(arr[j][i])
col.append(a)
cnt = 0
for i in range(n):
for j in range(len(col[i])):
if j != 0 and col[i][j] == 2 and col[i][j - 1] != 2:
cnt += 1
print(f'#{t} {cnt}')
|
[
"cdh3261@naver.com"
] |
cdh3261@naver.com
|
91efd913c270d343c4b45b6d1eb44d4aa58f912c
|
35a6b6b5cabcf9fb39527bab020ef7c96265a026
|
/p3.py
|
5911e61bf240cc3e917c3377949ca16c9c46851d
|
[] |
no_license
|
mepky/data-structure-and-algorithm
|
9a1324142276e6966692c51734613f15234f5300
|
96f64e657f97e46fc2d32cca5294fa0f104d5d01
|
refs/heads/master
| 2020-03-24T08:57:41.692564
| 2020-02-10T12:40:13
| 2020-02-10T12:40:13
| 142,614,071
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
from collections import defaultdict
t=int(input())
for _ in range(t):
l=defaultdict(int)
n=int(input())
d=2**20
t=0
s=input()
a=[-1]*27
for i in range(n):
if a[ord(s[i])-97]==-1:
a[ord(s[i])-97]=i
else:
d=min(d,i-a[ord(s[i])-97])
t=1
a[ord(s[i])-97]=i
if t==0:
print(0)
else:
print(n-d)
|
[
"noreply@github.com"
] |
mepky.noreply@github.com
|
f4cc030b9c8573c816c10160ff087a8c68c9d808
|
e00cf0bf72421ec31e4d3608c615aeeba5064731
|
/wows/move.py
|
3165d0d74b85208a58ea1b2ed7ee70fd489a053c
|
[] |
no_license
|
lorne-luo/auto-wows
|
b4a84c7d99585c84a635fb5be11fd0f03a5f37fd
|
992ad473f1d5a78686e1c4c939c6c218e72373d7
|
refs/heads/master
| 2020-12-30T00:52:17.497039
| 2020-02-25T11:10:30
| 2020-02-25T11:10:30
| 238,803,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
import time
from random import randint
import pyautogui as pag
import settings as settings
from helper import search_template, get_map_image
class WOWS_Move(object):
def move_ship(self):
global MOVE_TO
pag.press('m', presses=1, interval=0.25)
pag.sleep(1.5)
if not MOVE_TO:
map_image = get_map_image()
self_loc = search_template(map_image, 'map_self_icon.bmp')
print('self_loc', self_loc)
if self_loc:
MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] - self_loc[1],
settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] - self_loc[0])
else:
MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] / 2,
settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] / 2)
for i in range(4):
loc = (MOVE_TO[0] + randint(-50, 50),
MOVE_TO[1] + randint(-50, 50))
pag.moveTo(loc)
pag.click(clicks=2, interval=0.5, button='left')
time.sleep(1)
pag.press('esc')
time.sleep(2)
|
[
"dev@luotao.net"
] |
dev@luotao.net
|
12e05ceaac7c5c4174fb21ada9bdbb1e70c90c54
|
ffb05b145989e01da075e2a607fb291955251f46
|
/pypers/oxford/non_cooperative.py
|
6c7b293967ae50f89ebf7f90ccccdc8e62ba6d40
|
[] |
no_license
|
micheles/papers
|
a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7
|
be9070f8b7e8192b84a102444b1238266bdc55a0
|
refs/heads/master
| 2023-06-07T16:46:46.306040
| 2018-07-14T04:17:51
| 2018-07-14T04:17:51
| 32,264,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
# non_cooperative.py
class B1(object):
def __init__(self, **kw):
print "B1.__init__"
super(B1, self).__init__(**kw)
class B2(object):
def __init__(self, **kw):
print "B2.__init__"
super(B2, self).__init__(**kw)
|
[
"michele.simionato@gmail.com"
] |
michele.simionato@gmail.com
|
94f2093636ae67fdc8ec2d5431c2b52cbd51d7c2
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.0_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=25/params.py
|
1e06a0ee411f4cd8e4e96c1df8f010d7336d6730
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.041500',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 25,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
f3467f1043b80a0ea9337c61aa83eb37180e440c
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/mphnok005/question3.py
|
8ab32014734be33c45000ec60015c87758483dae
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
from math import*
x=sqrt(2)
a=2
pi=2*(a/x)
while x<2:
x=(sqrt(2+x))
pi=(pi*a/x)
print("Approximation of pi:",round(pi,3))
c=eval(input("Enter the radius:\n"))
print("Area:",round(c**2*pi,3))
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
3da3e17495525b485fd627a5d52d55b261e728ec
|
8d50cc4f37c153fcb51de4501f3fa50c00394d9b
|
/test/benchmark/resnet_tl_benchmark.py
|
0273e724c53f6d0c0598924637c84431b5b3fe0c
|
[
"MIT"
] |
permissive
|
liujuanLT/InsightFace_TF
|
dbd239dfdda1866c348e82211932884f73cb3067
|
257b6e0dcf7e7c3523dc7e1c08ba529fab1bf75b
|
refs/heads/master
| 2022-04-27T21:24:01.458277
| 2022-03-17T12:28:15
| 2022-03-17T12:28:15
| 463,040,192
| 0
| 0
|
MIT
| 2022-02-24T06:51:16
| 2022-02-24T06:51:15
| null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
import numpy as np
from nets.resnet import get_resnet
slim = tf.contrib.slim
resnet = nets.resnet_v1
if __name__ == '__main__':
output_shape = 85164
batch_size = 128
image = tf.placeholder(name='input_x', shape=[None, 224, 224, 3], dtype=tf.float32)
labels = tf.placeholder(name='input_label', shape=[None, output_shape], dtype=tf.float32)
with slim.arg_scope(nets.resnet_utils.resnet_arg_scope()):
nets = get_resnet(image, output_shape, 50, type='resnet', sess=None, pretrained=False)
print(nets.outputs)
probabilities = tf.reduce_mean(tf.nn.softmax(nets.outputs, dim=-1))
print(probabilities)
losses = tf.norm(tf.subtract(probabilities, labels))
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(losses)
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
while True:
datasets = np.random.randn(batch_size, 224, 224, 3).astype(np.float32)
datasets_labels = np.random.randn(batch_size, output_shape).astype(np.float32)
losses_val, _ = sess.run([losses, train_op], feed_dict={image: datasets, labels: datasets_labels})
print(losses_val)
|
[
"auroua@yeah.net"
] |
auroua@yeah.net
|
51d5ae1fa2d5ae73a65d826bd1113e9b57cef767
|
03383b657ad6d526e7e6aa6639fe41019cd39ea2
|
/recursion/palandrome.py
|
985c2d63e7528bf16d0978634606988d462fbf30
|
[] |
no_license
|
ahmedmeshref/Leetcode-Solutions
|
1c5f908cb2f6487c9dfadcc8f91192dedbb5a17e
|
28f848cb25e4aa22e6d8c9d715488f191ed15137
|
refs/heads/main
| 2023-05-26T14:33:11.246122
| 2021-06-07T21:32:13
| 2021-06-07T21:32:13
| 356,045,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
def isPalindrome(s: str) -> bool:
l = 0
r = len(s) - 1
while l != r:
if not s[l].isalpha():
l += 1
elif not s[r].isalpha():
r -= 1
elif s[l].lower() != s[r].lower():
return False
else:
l += 1
r -= 1
return s[l] == s[r]
print(isPalindrome("A man, a plan, a canal: Panama"))
|
[
"a.meshref@alustudent.com"
] |
a.meshref@alustudent.com
|
e49952bb3039c47341a3a2001f153c1fcea8521c
|
05169e203974411667ab947298a74575b8a179e0
|
/packages/jet_bridge_base/jet_bridge_base/serializers/relationship_override.py
|
c985788ccf2e5eeaf886f08eb8bb093846f356d8
|
[
"MIT"
] |
permissive
|
jet-admin/jet-bridge
|
f6b563e1801985063483ddb02e9e1c3301dc0612
|
c53d30fb308eed5822083eaf71f641c4098610cc
|
refs/heads/master
| 2023-09-01T14:31:42.261427
| 2023-08-24T13:54:34
| 2023-08-24T13:54:34
| 163,167,532
| 1,564
| 166
|
MIT
| 2023-03-18T03:20:04
| 2018-12-26T10:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,873
|
py
|
from jet_bridge_base.models.model_relation_override import ModelRelationOverrideModel
from jet_bridge_base.store import store
from sqlalchemy import inspect
from jet_bridge_base import fields
from jet_bridge_base.db import get_mapped_base, reload_request_graphql_schema, get_request_connection
from jet_bridge_base.exceptions.validation_error import ValidationError
from jet_bridge_base.serializers.serializer import Serializer
from jet_bridge_base.logger import logger
class ModelDescriptionRelationOverrideSerializer(Serializer):
direction = fields.CharField()
local_field = fields.CharField()
related_model = fields.CharField()
related_field = fields.CharField()
class ModelDescriptionRelationOverridesSerializer(Serializer):
model = fields.CharField()
relations = ModelDescriptionRelationOverrideSerializer(many=True)
def get_model(self, request, name):
MappedBase = get_mapped_base(request)
return MappedBase.classes.get(name)
def generate_many_to_one_name(self, mapper, local_field, related_model, related_field):
name = '__'.join([local_field, 'to', related_model, related_field])
if name in mapper.columns:
name = name + '_relation'
logger.warning('Already detected column name, using {}'.format(name))
return name
def generate_one_to_many_name(self, mapper, local_field, related_model, related_field):
name = '__'.join([related_model, related_field, 'to', local_field])
if name in mapper.columns:
name = name + '_relation'
logger.warning('Already detected column name, using {}'.format(name))
return name
def validate(self, attrs):
request = self.context.get('request')
Model = self.get_model(request, attrs['model'])
if Model is None:
raise ValidationError('Unknown relation override model: {}'.format(attrs['model']))
mapper = inspect(Model)
for item in attrs['relations']:
if item['direction'] == 'MANYTOONE':
item['name'] = self.generate_many_to_one_name(mapper, item['local_field'], item['related_model'], item['related_field'])
elif item['direction'] == 'ONETOMANY':
item['name'] = self.generate_one_to_many_name(mapper, item['local_field'], item['related_model'], item['related_field'])
else:
raise ValidationError('Unknown relation direction: {}'.format(item['direction']))
return attrs
def save(self):
request = self.context.get('request')
connection = get_request_connection(request)
draft = bool(request.get_argument('draft', False))
with store.session() as session:
with session.begin():
for item in self.validated_data:
set_overrides = sorted(item['relations'], key=lambda x: x['name'])
existing_overrides = session.query(ModelRelationOverrideModel).filter(
ModelRelationOverrideModel.connection_id == connection['id'],
ModelRelationOverrideModel.model == item['model'],
draft == draft
).order_by(ModelRelationOverrideModel.name).all()
existing_overrides = list(existing_overrides)
for i, override in enumerate(set_overrides):
existing_override = existing_overrides[i] if i < len(existing_overrides) else None
if existing_override:
existing_override.name = override.get('name')
existing_override.direction = override.get('direction')
existing_override.local_field = override.get('local_field')
existing_override.related_model = override.get('related_model')
existing_override.related_field = override.get('related_field')
else:
session.add(ModelRelationOverrideModel(
connection_id=connection['id'],
model=item['model'],
draft=draft,
name=override.get('name'),
direction=override.get('direction'),
local_field=override.get('local_field'),
related_model=override.get('related_model'),
related_field=override.get('related_field')
))
delete_overrides = existing_overrides[len(item['relations']):]
for override in delete_overrides:
session.delete(override)
reload_request_graphql_schema(request, draft)
|
[
"f1nal@cgaming.org"
] |
f1nal@cgaming.org
|
8ba80ac4b037dde92443141d60bd35bf1f98031e
|
e4414bd8152e52855db7ab9065ae12b7329143e0
|
/python/src/hangman.py
|
0dd38bbfdc6501bc39f632a253400dd40bbf2d07
|
[] |
no_license
|
catalinc/programmingpraxis-solutions
|
39cb847877ec46d2fb85740791c24889ab5654a8
|
c0b13906aa76ffac705bf108db138fb9a38bc16a
|
refs/heads/master
| 2021-03-27T16:46:47.781839
| 2017-09-09T15:17:38
| 2017-09-09T15:17:38
| 53,532,233
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
#!/usr/bin/env python
# See http://programmingpraxis.com/2011/12/20/hangman/
import random
import sys
HANGMAN = [
"",
"""
O
""",
"""
O
|
""",
"""
_O
|
""",
"""
_O_
|
""",
"""
_O_
|
/
""",
"""
_O_
|
/ \\
"""
]
def play_game():
secret_word = random_word().upper()
guessed_letters = set()
failed_attempts = 0
print_matches(secret_word, guessed_letters)
while True:
try:
letter = raw_input("Your guess ? ").upper()
except KeyboardInterrupt:
exit_game()
if letter in secret_word:
guessed_letters.add(letter)
else:
failed_attempts += 1
print_hangman(failed_attempts)
if lose(failed_attempts):
print("Sorry, you lose...")
print("The word was: %s" % (" ".join(list(secret_word))))
break
print_matches(secret_word, guessed_letters)
if win(secret_word, guessed_letters):
print("You nail it !")
break
def random_word(words_file='words.lst'):
word = None
n = 0
with open(words_file) as f:
for line in f:
n += 1
if random.random() < 1.0 / n:
word = line
return word
def print_matches(word, letters):
out = []
for l in word:
if l in letters:
out.append(l)
else:
out.append("_")
print(" ".join(out))
def exit_game():
print("Bye !")
sys.exit(0)
def print_hangman(guess_attempts):
print HANGMAN[guess_attempts]
def win(secret_word, guessed_letters):
return len(secret_word) == len(guessed_letters)
def lose(failed_attempts):
return failed_attempts == len(HANGMAN) - 1
if __name__ == '__main__':
print("Let's play Hangman !")
while True:
play_game()
if raw_input("Play another ? [Y]/N ").upper() == "N":
exit_game()
|
[
"catalin.cristu@gmail.com"
] |
catalin.cristu@gmail.com
|
99023c5533e743afb8349cd031816969f2e0f52e
|
6527b66fd08d9e7f833973adf421faccd8b765f5
|
/yuancloud/recicler/event/tests/test_mail_schedule.py
|
7b92308e184f89e0d7bc6436545f7d9324c6b05d
|
[] |
no_license
|
cash2one/yuancloud
|
9a41933514e57167afb70cb5daba7f352673fb4d
|
5a4fd72991c846d5cb7c5082f6bdfef5b2bca572
|
refs/heads/master
| 2021-06-19T22:11:08.260079
| 2017-06-29T06:26:15
| 2017-06-29T06:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
# -*- coding: utf-8 -*-
import datetime
from dateutil.relativedelta import relativedelta
from yuancloud import fields, tools
from yuancloud.addons.event.tests.common import TestEventCommon
from yuancloud.tools import mute_logger
class TestMailSchedule(TestEventCommon):
@mute_logger('yuancloud.addons.base.ir.ir_model', 'yuancloud.models')
def test_00_event_mail_schedule(self):
""" Test mail scheduling for events """
self.env['ir.values'].set_default('event.config.settings', 'auto_confirmation', True)
now = fields.datetime.now()
event_date_begin = now + relativedelta(days=1)
event_date_end = now + relativedelta(days=3)
test_event = self.Event.sudo(self.user_eventmanager).create({
'name': 'TestEventMail',
'date_begin': event_date_begin,
'date_end': event_date_end,
'seats_max': 10,
'event_mail_ids': [
(0, 0, { # right at subscription
'interval_unit': 'now',
'interval_type': 'after_sub',
'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_subscription')}),
(0, 0, { # 2 days before event
'interval_nbr': 2,
'interval_unit': 'days',
'interval_type': 'before_event',
'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_reminder')}),
]
})
# create some registrations
self.Registration.sudo(self.user_eventuser).create({
'event_id': test_event.id,
'name': 'Reg0',
'email': 'reg0@example.com',
})
self.Registration.sudo(self.user_eventuser).create({
'event_id': test_event.id,
'name': 'Reg1',
'email': 'reg1@example.com',
})
# check subscription scheduler
schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'after_sub')])
self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation')
self.assertEqual(schedulers[0].scheduled_date, test_event.create_date, 'event: incorrect scheduled date for checking controller')
# verify that subscription scheduler was auto-executed after each registration
self.assertEqual(len(schedulers[0].mail_registration_ids), 2, 'event: incorrect number of mail scheduled date')
mails = self.env['mail.mail'].search([('subject', 'ilike', 'subscription'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3)
self.assertEqual(len(mails), 2, 'event: wrong number of subscription mail sent')
for registration in schedulers[0].mail_registration_ids:
self.assertTrue(registration.mail_sent, 'event: wrongly confirmed mailing on subscription')
# check before event scheduler
schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'before_event')])
self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation')
self.assertEqual(schedulers[0].scheduled_date, datetime.datetime.strftime(event_date_begin + relativedelta(days=-2), tools.DEFAULT_SERVER_DATETIME_FORMAT), 'event: incorrect scheduled date')
# execute event reminder scheduler explicitly
schedulers[0].execute()
self.assertTrue(schedulers[0].mail_sent, 'event: reminder scheduler should have sent an email')
self.assertTrue(schedulers[0].done, 'event: reminder scheduler should be done')
mails = self.env['mail.mail'].search([('subject', 'ilike', 'reminder'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3)
self.assertEqual(len(mails), 2, 'event: wrong number of reminders in outgoing mail queue')
|
[
"liuganghao@lztogether.com"
] |
liuganghao@lztogether.com
|
7d82abc23d5e3d4bf5e54cd6ec2da4a4d1a8768f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02631/s475540632.py
|
3a87c44b29ca2411c0463a78d1676b61c5e7616c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
N = int(input())
a = list(map(int, input().split()))
# XOR演算子 ^
# aの要素全てのXORを計算、それをSとする
S = 0
for aa in a:
S ^= aa
# i番目の番号はaiとSのXORで表される
ans = []
for ai in a:
ans.append(S ^ ai)
print(*ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8319e02dd8e51c0f3c972288a559d15a0f3bb1c5
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/common/Lib/plat-mac/Carbon/Cm.py
|
81888a1e6189f6251d73285153430da7c7720a3a
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406
| 2017-05-04T13:51:43
| 2017-05-04T13:51:43
| 90,268,530
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 362
|
py
|
# 2017.05.04 15:34:09 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/Cm.py
from _Cm import *
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-mac\Carbon\Cm.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:34:09 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
b3df535e0bf14619764330e153f9691f97ebfe7a
|
ae3df32afc258c80cb2ce504ce87fa5bb7740ea7
|
/main/apps.py
|
a1c166fbd7a6ef9873d13d2341e00132f5d8b9dd
|
[] |
no_license
|
chensandiego/elast-python
|
622251d806b947899d74dc064c19193b418ac505
|
8c28a47acfc5ef540a017abcd786cf815591b163
|
refs/heads/master
| 2020-12-24T05:40:38.917432
| 2016-08-08T09:16:39
| 2016-08-08T09:16:39
| 65,190,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
from elasticsearch_dsl.connections import connections
class MainConfig(AppConfig):
name = 'main'
def ready(self):
connections.create_connection()
|
[
"chensandiego@gmail.com"
] |
chensandiego@gmail.com
|
03077baac22100638f1f73d6914d61d5790e636d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03804/s359892659.py
|
cf5a2561cd5c03f89d2bfa0dc2d375e6139544c1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
n, m = map(int, input().split())
a = [""] * n
b = [""] * m
for i in range(n):
a[i] = input()
for i in range(m):
b[i] = input()
for i in range(n):
for j in range(n):
if i + m > n or j + m > n:
continue
flag = True
for k in range(m):
for l in range(m):
if a[i + k][j + l] != b[k][l]:
flag = False
if flag is True:
print("Yes")
exit(0)
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d5ca2bcbd5de3c1b9c9bac46eab8058ddbdaa268
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/1005.py
|
d1785f14535df9f9f8739a47a08da1ea17308063
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
import numpy as np
def solve(n, j):
front = 0
for i in range(1, len(n)):
if n[-i-1] > n[-i]:
n[-i-1] -= 1
front = i
if front:
n[-front:] = 9
if not n[0]:
n = n[1:]
print('Case #{}: {}'.format(j+1, ''.join(map(str, n))))
def main():
T = int(input())
for i in range(T):
solve(np.array(list(map(int, list(input())))), i)
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
4b4fb06a5c7779a15bbde10c3ca456691d7aa16b
|
2ed6ad4a736879a47d192159da45ca56610c089a
|
/tests/test_utils.py
|
5322f50e74f0e19c141fd1adbdd2a5b05e92fb39
|
[
"MIT"
] |
permissive
|
poonyisaTH/gsheets-db-api
|
a82bd35984766697757cc96aa74a1281d948f019
|
f023b32986d4da9a501fca8d435f2b6edc153353
|
refs/heads/master
| 2023-05-29T15:01:10.604324
| 2021-02-17T20:59:41
| 2021-02-17T20:59:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
# -*- coding: utf-8 -*-
import unittest
from moz_sql_parser import parse
import pyparsing
from .context import format_gsheet_error, format_moz_error
class UtilsTestSuite(unittest.TestCase):
def test_format_moz_error(self):
query = 'SELECT ))) FROM table'
with self.assertRaises(pyparsing.ParseException) as context:
parse(query)
result = format_moz_error(query, context.exception)
expected = (
'SELECT ))) FROM table\n'
' ^\n'
'Expected {{expression1 [{[as] column_name1}]} | "*"} '
'(at char 7), (line:1, col:8)'
)
self.assertEqual(result, expected)
def test_format_gsheet_error(self):
query = 'SELECT A + B FROM "http://docs.google.com"'
translated_query = 'SELECT A + B'
errors = [{
'reason': 'invalid_query',
'detailed_message': (
"Invalid query: Can't perform the function sum on values that "
"are not numbers"
),
'message': 'INVALID_QUERY',
}]
result = format_gsheet_error(query, translated_query, errors)
expected = (
'Original query:\n'
'SELECT A + B FROM "http://docs.google.com"\n\n'
'Translated query:\n'
'SELECT A + B\n\n'
'Error:\n'
"Invalid query: Can't perform the function sum on values that "
"are not numbers"
)
self.assertEqual(result, expected)
def test_format_gsheet_error_caret(self):
query = 'SELECT A IS NULL FROM "http://docs.google.com"'
translated_query = 'SELECT A IS NULL'
errors = [{
'reason': 'invalid_query',
'detailed_message': (
'Invalid query: PARSE_ERROR: Encountered " "is" "IS "" at '
'line 1, column 10.\nWas expecting one of:\n'
' <EOF> \n'
' "where" ...\n'
' "group" ...\n'
' "pivot" ...\n'
' "order" ...\n'
' "skipping" ...\n'
' "limit" ...\n'
' "offset" ...\n'
' "label" ...\n'
' "format" ...\n'
' "options" ...\n'
' "," ...\n'
' "*" ...\n'
' "+" ...\n'
' "-" ...\n'
' "/" ...\n'
' "%" ...\n'
' "*" ...\n'
' "/" ...\n'
' "%" ...\n'
' "+" ...\n'
' "-" ...\n'
' '
),
'message': 'INVALID_QUERY',
}]
result = format_gsheet_error(query, translated_query, errors)
expected = (
'Original query:\n'
'SELECT A IS NULL FROM "http://docs.google.com"\n\n'
'Translated query:\n'
'SELECT A IS NULL\n\n'
'Error:\n'
'SELECT A IS NULL\n'
' ^\n'
'Invalid query: PARSE_ERROR: Encountered " "is" "IS "" at line 1, '
'column 10.\n'
'Was expecting one of:\n'
' <EOF> \n'
' "where" ...\n'
' "group" ...\n'
' "pivot" ...\n'
' "order" ...\n'
' "skipping" ...\n'
' "limit" ...\n'
' "offset" ...\n'
' "label" ...\n'
' "format" ...\n'
' "options" ...\n'
' "," ...\n'
' "*" ...\n'
' "+" ...\n'
' "-" ...\n'
' "/" ...\n'
' "%" ...\n'
' "*" ...\n'
' "/" ...\n'
' "%" ...\n'
' "+" ...\n'
' "-" ...'
)
self.assertEqual(result, expected)
|
[
"roberto@dealmeida.net"
] |
roberto@dealmeida.net
|
26c3ed7037c5e7c99c281a9602db0848de390886
|
ce55c319f5a78b69fefc63595d433864a2e531b5
|
/前后端分离-vue-DRF/houfen_DRF-projects/15day周末作业/booklogin/user/views.py
|
ea76a3b29e6788ab22cbcb4e135039d76dd5f722
|
[] |
no_license
|
Suijng/1809_data
|
a072c875e8746190e3b715e53f1afe3323f4666b
|
45f8a57089f5c30ccc1a3cddb03b76dc95355417
|
refs/heads/master
| 2022-12-21T12:38:30.458291
| 2019-09-27T01:14:41
| 2019-09-27T01:14:41
| 211,207,071
| 0
| 0
| null | 2022-11-22T03:16:18
| 2019-09-27T00:55:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,016
|
py
|
from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from user.serializers import ResgsterUserSerializer,CategorySerializer,\
BookDetailSerializer,BookSerializer,\
ChpaterListSerializer,ChpaterDetailSerializer
from user.models import User,Token,Category,Book,Chpater
from utils.pagination import MyPageNumberPagination
# 注册
# class RegisterView(APIView):
#
# def post(self,request,*args,**kwargs):
# ret = {
# 'code':1,
# 'msg':'注册成功'
# }
# # 获取post请求参数
# data = request.data
# # 序列化请求参数
# ser = ResgsterUserSerializer(data=data)
# if ser.is_valid(): # 验证字段
# print(ser.validated_data)
# ser.save()
# else:
# # 验证失败打印错误信息
# print(ser.errors)
# ret['code'] = 0
# ret['msg'] = '参数错误,注册失败'
#
# return Response(ret)
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import CreateModelMixin,ListModelMixin,RetrieveModelMixin
from rest_framework import status
# 注册
class RegisterView(CreateModelMixin,GenericViewSet):
queryset = User.objects.all()
serializer_class = ResgsterUserSerializer
# 重写内部创建方法
def create(self, request, *args, **kwargs):
ret = {
'code': 1,
'msg': '注册成功'
}
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(ret,status=status.HTTP_201_CREATED, headers=headers)
else:
# 验证失败打印错误信息
print(serializer.errors)
ret['code'] = 0
ret['msg'] = '参数错误,注册失败'
return Response(ret)
#************************** 登录
# 生成token
import time,hashlib
def get_token(name,password):
add_time = str(int(time.time() * 1000))
md5_obj = hashlib.md5(add_time.encode('utf8'))
md5_obj.update(name.encode('utf8'))
md5_obj.update(password.encode('utf8'))
return md5_obj.hexdigest()
# 登录
class LoginView(APIView):
def post(self,request,*args,**kwargs):
ret = {
'code': 1,
'msg': '登录成功'
}
# 获取post请求
data = request.data
# 获取用户名
name = data['name']
# 获取密码
password = data['password']
try:
obj = User.objects.filter(name=name).first()
if obj:
# 用户存在的
if obj.password == password:
# 登录成功 生成登录标识
token = get_token(name,password)
Token.objects.update_or_create(user=obj,defaults={'token':token})
ret['token'] = token
else:
# 密码错误
ret['msg'] = '账号或密码错误'
ret['code'] = 0
else:
ret['msg'] = '该用户不存在'
ret['code'] = 0
except Exception as e:
print(e)
ret['msg'] = '捕获异常'
ret['code'] = 0
return Response(ret)
#****************** 书籍分类
class CategoryView(ListModelMixin,RetrieveModelMixin,GenericViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
pagination_class = MyPageNumberPagination
def get_serializer_class(self):
# 动态设置序列化的类
if self.action == 'list':
return CategorySerializer
elif self.action == 'retrieve':
return BookSerializer
# 给前端展示的字典套列表套字典
def list(self, request, *args, **kwargs):
print(request.version) # 打印版本
ret = {
'code': 1,
}
queryset = self.filter_queryset(self.get_queryset())
# 没有分页展示所有数据
serializer = self.get_serializer(queryset, many=True)
ret['data'] = serializer.data
return Response(ret)
#***** 书籍分类下的书
def retrieve(self, request, *args, **kwargs):
category_id = kwargs.get('pk')
if category_id:
books = Book.objects.filter(category=category_id)
# 调用paginate_queryset方法获取当前分页数据
page = self.paginate_queryset(books)
# 通过判断page结果 判断是否使用了分页
if page is not None:
serializer = self.get_serializer(page,many=True)
return self.get_paginated_response(serializer.data)
#******** 书籍详情视图 获取每本book书的url地址
class BookDetailView(RetrieveModelMixin,GenericViewSet):
queryset = Book.objects.all()
serializer_class = BookDetailSerializer
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
ret = {'code':1,'data':serializer.data}
return Response(ret)
# 章节列表视图
from utils.authenandpermission import MyPermission,MyAuthentication
class ChapterView(ListModelMixin,RetrieveModelMixin,GenericViewSet):
queryset = Chpater.objects.all()
serializer_class = ChpaterListSerializer
pagination_class = MyPageNumberPagination
def get_serializer_class(self):
if self.action == 'list':
return ChpaterListSerializer
elif self.action == 'retrieve':
return ChpaterDetailSerializer
# 认证
def get_authenticators(self):
if self.kwargs.get('pk'):
# 根据章节id获取,章节详情
return [MyAuthentication(),]
return []
# 权限
def get_permissions(self):
if self.kwargs.get('pk'):
# 根据章节id获取,章节详情,返回权限类
return [MyPermission(), ]
return []
def list(self, request, *args, **kwargs):
book_id = kwargs.get('bookid')
if book_id:
chpaters = Chpater.objects.filter(book=book_id)
# 调用paginate_queryset方法获取当前分页数据
page = self.paginate_queryset(chpaters)
# 通过判断page结果 判断是否使用了分页
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, *args, **kwargs):
# 根据章节的id获取章节详情信息
instance = self.get_object()
serializer = self.get_serializer(instance)
ret = {'code':1,'data':serializer.data}
return Response(ret)
|
[
"1627765913@qq.com"
] |
1627765913@qq.com
|
bcf4694b4be4de84974a88f8c1e0c68664a56527
|
4913fb7fd32c3dd0da53af7a012569ec2254b35a
|
/59.集合数据的操作.py
|
75c83dbf6875876bad10856772cd2746191883a6
|
[] |
no_license
|
puhaoran12/python_note
|
8a21954050ba3126f2ef6d5d1e4a2904df954b9b
|
b807e7b7dd90c87cee606f50421400c8f3d0ba03
|
refs/heads/master
| 2023-07-07T20:20:04.546541
| 2021-08-21T02:17:12
| 2021-08-21T02:17:12
| 398,439,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
#交集intersection() 或者 &
s1={10,20,30}
s2={30,40,50}
print(s1.intersection(s2))
print(s1 & s2)
#并集union() 或者 |
print(s1.union(s2))
print(s1 | s2)
#差集difference() 或者 -
print(s1.difference(s2))
print(s1 - s2)
#对称差集symmetric_difference() 或者 ^
print(s1.symmetric_difference(s2))
print(s1 ^ s2)
|
[
"276191374@qq.com"
] |
276191374@qq.com
|
1265bb8736bd9b11afc120fcc3bdcb77428869ec
|
29a4c1e436bc90deaaf7711e468154597fc379b7
|
/modules/ieee/doc2/nextpow2.py
|
7c5fcf15c55e96875561be4f21550ed813ecbc7a
|
[
"BSL-1.0"
] |
permissive
|
brycelelbach/nt2
|
31bdde2338ebcaa24bb76f542bd0778a620f8e7c
|
73d7e8dd390fa4c8d251c6451acdae65def70e0b
|
refs/heads/master
| 2021-01-17T12:41:35.021457
| 2011-04-03T17:37:15
| 2011-04-03T17:37:15
| 1,263,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
[{'functor': {'arity': '1',
'call_types': [],
'ret_arity': '0',
'rturn': {'default': 'typename nt2::meta::as_integer<typename boost::result_of<nt2::meta::floating(T)>::type, signed>::type'},
'type_defs': [],
'types': ['real_', 'unsigned_int_', 'signed_int_']},
'unit': {'global_header': {'first_stamp': 'modified by jt the 04/12/2010',
'included': [],
'notes': [],
'ranges': {'real_': [['T(-10)', 'T(10)']],
'signed_int_': [['-100', '100']],
'unsigned_int_': [['0', '100']]},
'specific_values': {'default': {},
'real_': {'nt2::Inf<T>()': 'nt2::Zero<r_t>()',
'nt2::Minf<T>()': 'nt2::Zero<r_t>()',
'nt2::Mone<T>()': 'nt2::Zero<r_t>()',
'nt2::One<T>()': 'nt2::Zero<r_t>()',
'nt2::Zero<T>()': 'nt2::Zero<r_t>()'},
'signed_int_': {'nt2::Mone<T>()': 'nt2::Zero<r_t>()',
'nt2::One<T>()': 'nt2::Zero<r_t>()',
'nt2::Zero<T>()': 'nt2::Zero<r_t>()'},
'unsigned_int_': {'nt2::One<T>()': 'nt2::Zero<r_t>()',
'nt2::Zero<T>()': 'nt2::Zero<r_t>()'}},
'stamp': 'modified by jt the 12/12/2010',
'verif_test': {}}},
'version': '0.1'}]
|
[
"jtlapreste@gmail.com"
] |
jtlapreste@gmail.com
|
969ff18c3b0c3ebd06ccfc2dc0dfe97216e6a725
|
6a47ec6800610ea93479f91505e73a3eb4f34ae0
|
/user/serviced.py
|
74e25a9df84b86e320e670d436afb861e42769b5
|
[] |
no_license
|
risification/queue_project
|
1158aac7bae3b04f98c106c23c27281c96bcaf41
|
e85f9f2d1835f10a0247a569f88d4cb29803538a
|
refs/heads/master
| 2023-04-26T08:05:25.573243
| 2021-06-07T21:03:47
| 2021-06-07T21:03:47
| 374,119,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
def mailing(username):
email_list = []
obj = User.objects.filter(is_superuser=True)
for user in obj:
email_list.append(user.email)
subjects = 'hi'
body = f'User with {username} register in database, pls check him !'
email = EmailMessage(subject=subjects, body=body, to=email_list)
email.send()
def validate_password(password):
if len(password) >= 8 and password.isdigit() and password.isalpha():
return True
else:
return False
|
[
"sultangaziev01@bk.ru"
] |
sultangaziev01@bk.ru
|
5a4d8c674b599a2c01fdc8fd795bf0ea39b3d9b4
|
0ddcfcbfc3faa81c79e320c34c35a972dab86498
|
/puzzles/orderly_queue.py
|
a373cf1f75c26e6261bdd30af8d0855a2660bb45
|
[] |
no_license
|
IvanWoo/coding-interview-questions
|
3311da45895ac4f3c394b22530079c79a9215a1c
|
1312305b199b65a11804a000432ebe28d1fba87e
|
refs/heads/master
| 2023-08-09T19:46:28.278111
| 2023-06-21T01:47:07
| 2023-06-21T01:47:07
| 135,307,912
| 0
| 0
| null | 2023-07-20T12:14:38
| 2018-05-29T14:24:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
# https://leetcode.com/problems/orderly-queue/
"""
ou are given a string s and an integer k. You can choose one of the first k letters of s and append it at the end of the string..
Return the lexicographically smallest string you could have after applying the mentioned step any number of moves.
Example 1:
Input: s = "cba", k = 1
Output: "acb"
Explanation:
In the first move, we move the 1st character 'c' to the end, obtaining the string "bac".
In the second move, we move the 1st character 'b' to the end, obtaining the final result "acb".
Example 2:
Input: s = "baaca", k = 3
Output: "aaabc"
Explanation:
In the first move, we move the 1st character 'b' to the end, obtaining the string "aacab".
In the second move, we move the 3rd character 'c' to the end, obtaining the final result "aaabc".
Constraints:
1 <= k <= s.length <= 1000
s consist of lowercase English letters.
"""
def orderly_queue(s: str, k: int) -> str:
if k == 1:
return min([s[i:] + s[:i] for i in range(len(s))])
else:
return "".join(sorted(s))
|
[
"tyivanwu@gmail.com"
] |
tyivanwu@gmail.com
|
7267956f1f7b465699fb043dc755525ce97b5ccf
|
2c73882fc59ca85f4854a43bcda8cc9edd282b8d
|
/polls_api/views.py
|
2664dfc220c7e377fed156deed7d18e979f75115
|
[] |
no_license
|
mjstealth/guide-to-backbonejs-with-django
|
540236f3535ee171c3aa4c43a1be9394a8a7e4bc
|
e7d5016c800e1e0e282da0386cc6112d4eed63c1
|
refs/heads/master
| 2021-01-17T22:40:28.191509
| 2012-09-04T22:17:12
| 2012-09-04T22:17:12
| 5,679,419
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from djangorestframework import views
from djangorestframework.response import Response
from polls.forms import PollForm
from polls.models import Poll
from .resources import PollResource
class PollResults (views.View):
def get(self, request, poll_id):
poll = get_object_or_404(Poll.objects.all(), pk=poll_id)
results = PollResource().serialize(poll)
return results
class PollVotes (views.View):
def post(self, request, poll_id):
poll = get_object_or_404(Poll.objects.all(), pk=poll_id)
form = PollForm(request.POST, instance=poll)
if form.is_valid():
form.save()
else:
return Response(content=form.errors, status=400)
return Response(status=303, headers={'Location': reverse('polls_api_results', args=[poll_id])})
poll_results_view = PollResults.as_view()
poll_votes_view = PollVotes.as_view()
|
[
"mjumbewu@gmail.com"
] |
mjumbewu@gmail.com
|
42a8dac1509c16a1f9ee4746a23db2e89449bf64
|
11d265eba2ced9de43c339e4014c779b521320cd
|
/accounts/migrations/0004_auto_20200423_2253.py
|
eccb31bc3dd7e0a1872e9574429fc5cdc2edd129
|
[] |
no_license
|
Sloshpit/budget_old
|
d9271de625cd7e3aa66ccbec501b005e50cd2812
|
a5603996b026542adb3bc8c578c03bcb843bea01
|
refs/heads/master
| 2022-04-23T08:42:43.377827
| 2020-04-25T14:40:39
| 2020-04-25T14:40:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
# Generated by Django 3.0.5 on 2020-04-24 02:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200423_2251'),
]
operations = [
migrations.RenameField(
model_name='account',
old_name='transaction_date',
new_name='balance_date',
),
migrations.RemoveField(
model_name='account',
name='transaction',
),
migrations.RemoveField(
model_name='account',
name='transaction_amount',
),
]
|
[
"neel.maheshwari@gmail.com"
] |
neel.maheshwari@gmail.com
|
f71387df36af9f3c0cb4897aa762c93b0ccbdb5f
|
3f60b999ea7bda83c9586f75f52463dc20337f24
|
/sensitive_user_portrait/weibo_rank/Offline_task.py
|
de919db3a2449e8b9e35b521386aa9943040a4ae
|
[] |
no_license
|
jianjian0dandan/sensitive_user_portrait
|
629e49ce71db92b50634bac9c828811cdb5381e9
|
cacc30267ebc0e621b1d48d4f1206277a0f48123
|
refs/heads/master
| 2021-01-20T23:18:07.138057
| 2016-05-22T12:09:40
| 2016-05-22T12:09:40
| 42,869,287
| 0
| 0
| null | 2015-09-21T13:55:12
| 2015-09-21T13:55:11
| null |
UTF-8
|
Python
| false
| false
| 6,454
|
py
|
#-*-coding: utf-8 -*-
import datetime
import json
import time as TIME
from elasticsearch import Elasticsearch
from time_utils import ts2datetime, datetime2ts, ts2date
from global_utils import es_user_portrait as es
WEIBO_RANK_KEYWORD_TASK_INDEX = 'weibo_rank_keyword_task'
WEIBO_RANK_KEYWORD_TASK_TYPE = 'weibo_rank_task'
MAX_ITEMS = 2 ** 10
def add_task(user_name , type="keyword", range="all", pre='flow_text_', during='1', start_time='2013-09-07', end_time='2013-09-07', keyword='hello,world', sort_norm='reposts_count', sort_scope='all_limit_keyword', time=1, number=100):
time_now = int(TIME.time())
task_id = user_name + "-" + str(time_now)
tmp_list = keyword.split(',')
keyword_list = []
for item in tmp_list:
if item:
keyword_list.append(item)
body_json = {
'submit_user' : user_name ,
'keyword' : json.dumps(keyword_list),
'keyword_string': "&".join(keyword_list),
'submit_time' : ts2datetime(time_now),
'create_time': time_now,
'end_time' : datetime2ts(end_time),
'search_type' : type,
'status':0,
'range' : range ,
'user_ts' : user_name + '-'+ str(time_now),
'pre' : pre,
'during' : during ,
'start_time' : datetime2ts(start_time) ,
'sort_norm' : sort_norm ,
'sort_scope' : sort_scope,
'time' : time ,
'isall' : isall,
'number': number
}
es.index(index = WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id=task_id, body=body_json)
return body_json["user_ts"]
def search_weibo_task(user_name):
c_result = {}
query = {"query":{"bool":{"must":[{"term":{"user_rank_task.submit_user":user_name}}]}},"size":MAX_ITEMS,"sort":[{"create_time":{"order":"desc"}}],"fields":["status","search_type","keyword","submit_user","sort_scope","sort_norm","start_time","user_ts","end_time", "create_time", 'number']}
if 1:
return_list = []
result = es.search(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , body=query)['hits']
c_result['flag'] = True
for item in result['hits']:
result_temp = {}
result_temp['submit_user'] = item['fields']['submit_user'][0]
result_temp['search_type'] = item['fields']['search_type'][0]
result_temp['keyword'] = json.loads(item['fields']['keyword'][0])
result_temp['sort_scope'] = item['fields']['sort_scope'][0]
result_temp['sort_norm'] = item['fields']['sort_norm'][0]
result_temp['start_time'] = ts2datetime(item['fields']['start_time'][0])
result_temp['end_time'] = ts2datetime(item['fields']['end_time'][0])
result_temp['status'] = item['fields']['status'][0]
result_temp['create_time'] = ts2date(item['fields']['create_time'][0])
result_temp['search_id'] = item['fields']['user_ts'][0]
tmp = item['fields'].get('number', 0)
if tmp:
result_temp['number'] = int(tmp[0])
else:
result_temp['number'] = 100
return_list.append(result_temp)
c_result['data'] = return_list
return c_result
def getResult(search_id):
item = es.get(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id=search_id)
try:
result_obj = {}
result_obj['keyword'] = json.loads(item['_source']['keyword'])
result_obj['sort_scope'] = item['_source']['sort_scope']
result_obj['sort_norm'] = item['_source']['sort_norm']
result_obj['start_time'] = ts2datetime(item['_source']['start_time'])
result_obj['end_time'] =ts2datetime(item['_source']['end_time'])
result_obj['result'] = json.loads(item['_source']['result'])
result_obj['text_results'] = json.loads(item['_source']['text_results'])
result_obj['number'] = item['_source']['number']
return result_obj
except :
return []
def delOfflineTask(search_id):
es.delete(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id = search_id )
return True
def sort_task(user, keyword, status, start_time, end_time, submit_time):
query_body = {
"query":{
"filtered":{
"filter":{
"bool":{
"must":[
{"term":{"submit_user": user}}
]
}
}
}
},
"size": 10000,
"sort":{"submit_time":{"order":"desc"}}
}
query_list = []
if keyword:
keyword_list = keyword.split(',')
query_list.append({"terms":{"keyword_string":keyword_list}})
if status != 2:
query_list.append({"term":{"status": status}})
if start_time and end_time:
start_ts = datetime2ts(start_time)
end_ts = datetime2ts(end_time)
query_list.append({"range":{"start_time":{"gte":start_ts, "lte":end_ts}}})
query_list.append({"range":{"end_time":{"gte":start_ts, "lte":end_ts}}})
if submit_time:
query_list.append({"term":{"submit_time": submit_time}})
if query_list:
query_body["query"]["filtered"]["filter"]["bool"]["must"].extend(query_list)
#print query_body
search_results = es.search(index=WEIBO_RANK_KEYWORD_TASK_INDEX, doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE, body=query_body)["hits"]["hits"]
results = []
if search_results:
for item in search_results:
iter_item = item['_source']
tmp = []
tmp.append(iter_item['search_type'])
tmp.append(json.loads(iter_item['keyword']))
tmp.append(ts2datetime(iter_item['start_time']))
tmp.append(ts2datetime(iter_item['end_time']))
tmp.append(iter_item['range'])
tmp.append(ts2date(iter_item['create_time']))
tmp.append(iter_item['status'])
tmp.append(iter_item['sort_norm'])
tmp.append(iter_item['sort_scope'])
tmp.append(item['_id']) # task_name
results.append(tmp)
return results
if __name__ == "__main__":
print search_task("admin@qq.com", [], 0, '', '', '2016-04-12')
|
[
"1257819385@qq.com"
] |
1257819385@qq.com
|
43bf411f069beff4b058247c875c82e5f19f01bc
|
4b1965b3d831ab54998973afb26f4327ed010336
|
/info/user/views.py
|
4edbf7fa25d4329141c2449cb244798b16174185
|
[] |
no_license
|
yc12192057/information11_mm
|
7d353dfe61962eb0bd2c29b7f0b54a2a62953262
|
2e4052d130b200797aa8a57a0d37f8267d523a8b
|
refs/heads/master
| 2020-03-21T10:22:23.558714
| 2018-06-24T02:34:25
| 2018-06-24T02:34:25
| 138,447,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,383
|
py
|
from flask import current_app
from flask import g
from flask import request
from flask import session
from info.utils.image_storage import storage
from info import constants
from info import db
from info.models import Category, News
from info.utils.response_code import RET
from . import profile_blue
from flask import render_template,redirect,jsonify
from info.utils.common import user_login_data
from info.utils.image_storage import storage
@profile_blue.route("/news_list")
@user_login_data
def news_list():
page = request.args.get("p",1)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
user = g.user
paginate = News.query.filter(News.user_id == user.id).paginate(page,2,False)
items = paginate.items
current_page = paginate.page
total_page = paginate.pages
news_list = []
for item in items:
news_list.append(item.to_review_dict())
data = {
"current_page":current_page,
"total_page":total_page,
"news_list":news_list
}
return render_template("news/user_news_list.html",data = data)
@profile_blue.route("/news_release",methods = ["GET","POST"])
@user_login_data
def news_release():
if request.method == "GET":
# 首先获取到新闻分类,然后传递到模板页码,进行展示
category_list = Category.query.all()
categorys = []
for category in category_list:
categorys.append(category.to_dict())
# 删除列表当中0的元素
categorys.pop(0)
data = {
"categories":categorys
}
return render_template("news/user_news_release.html",data = data)
# 获取到表单页码提交过来的数据,获取的是用户发布的新闻数据
title = request.form.get("title")
category_id = request.form.get("category_id")
digest = request.form.get("digest")
index_image = request.files.get("index_image")
content = request.form.get("content")
if not all([title,category_id,digest,index_image,content]):
return jsonify(errno = RET.PARAMERR,errmsg = "参数错误")
user = g.user
index_image = index_image.read()
key = storage(index_image)
# 用户发布完成之后,我们需要把当前发布的新闻存储到数据
news = News()
news.title = title
news.source = "个人来源"
news.digest = digest
news.content = content
news.index_image_url = constants.QINIU_DOMIN_PREFIX + key
news.category_id = category_id
news.user_id = user.id
# 当前的状态1表示正在审核中
news.status = 1
db.session.add(news)
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "发布成功")
@profile_blue.route("/collection")
@user_login_data
def collection():
# 当前表示用户所有收藏的新闻,获取所有新闻涉及到分页,那么肯定是从第一页开始
page = request.args.get("p",1)
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
user = g.user
# 获取到当前登陆用户的所有的收藏新闻列表
# 第一个参数表示页码
# 第二个参数表示当前每个页码一共有多少条数据
paginate = user.collection_news.paginate(page,10,False)
items = paginate.items
current_page = paginate.page
total_page = paginate.pages
collections = []
for item in items:
collections.append(item.to_dict())
data = {
"collections":collections,
"current_page":current_page,
"total_page":total_page,
}
return render_template("news/user_collection.html",data = data)
"""修改密码"""
@profile_blue.route("/pass_info",methods = ["GET","POST"])
@user_login_data
def pass_info():
if request.method == "GET":
return render_template("news/user_pass_info.html")
user = g.user
old_password = request.json.get("old_password")
new_password = request.json.get("new_password")
if not all([old_password,new_password]):
return jsonify(errno = RET.PARAMERR,errmsg = "请输入密码")
# 判断旧的密码是否正确,只有当旧密码正确,才能修改新的密码
if not user.check_password(old_password):
return jsonify(errno = RET.PARAMERR,errmsg = "旧密码错误")
# 如果旧密码正确,那么直接更新到当前的数据库里面
user.password = new_password
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "密码修改成功")
@profile_blue.route("/pic_info",methods= ["GET","POST"])
@user_login_data
def pic_info():
user = g.user
if request.method == "GET":
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user_pic_info.html", data=data)
avatar = request.files.get("avatar").read()
# 如果上传成功,那么就会返回一个url地址,或者叫做key
# 如果想在浏览器里面浏览刚刚 上传的图片,那么必须通过
# 七牛的地址 + 刚刚返回的url
# http: // oyucyko3w.bkt.clouddn.com / + url
url = storage(avatar)
user.avatar_url = url
db.session.commit()
return jsonify(errno = RET.OK,errmsg = "上传成功",data={"avatar_url": constants.QINIU_DOMIN_PREFIX + url})
"""修改个人信息"""
@profile_blue.route("/base_info",methods = ["GET","POST"])
@user_login_data
def base_info():
user = g.user
if request.method == "GET":
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user_base_info.html",data = data)
nick_name = request.json.get("nick_name")
signature = request.json.get("signature")
gender = request.json.get("gender")
user.nick_name = nick_name
user.signature = signature
user.gender = gender
# 更新数据库
db.session.commit()
# 更新session里面的数据
session["nick_name"] = user.nick_name
return jsonify(errno = RET.OK,errmsg = "修改成功")
@profile_blue.route("/info")
@user_login_data
def info():
user = g.user
if not user:
# 重新跳转到首页
return redirect("/")
data = {
"user_info": user.to_dict() if user else None
}
return render_template("news/user.html",data = data)
|
[
"xwp_fullstack@163.com"
] |
xwp_fullstack@163.com
|
1fcb9a5bc116b70cacd5ddbd3646b35b3f6e0d8c
|
e0527bce5c53a196752d3a16adf50cb60754de5f
|
/05-How to Repeat Actions demos/02-dotty_dots.py
|
47bb00a38d29385492c81d3cb4b98ea027472cab
|
[] |
no_license
|
ARWA-ALraddadi/python-tutorial-for-beginners
|
ddeb657f419fbc176bea273bc9fb6b88d1894191
|
21cedfc47871ca4d25c2382464c60ab0a2121205
|
refs/heads/master
| 2023-06-30T20:24:30.688800
| 2021-08-08T08:22:29
| 2021-08-08T08:22:29
| 193,094,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
#---------------------------------------------------------------------
#
# Dotty dots - Repeating actions with minor variations
#
# Up until now the only repetition we've seen has been the same action
# done many times. This simple demonstration shows how actions can
# be repeated with minor variations for each different value in a
# list.
#
# The program simply draws a grid of multi-coloured dots. Experiment
# with the code to produce different patterns!
#
# Some useful constant values, all in pixels
canvas_size = 600
max_coord = 250
grid_size = 20
dot_size = 15
# Set up a drawing canvas with a black background
from turtle import *
setup(canvas_size, canvas_size)
title("Dotty dots")
bgcolor('black')
# Set up some drawing characteristics
penup()
speed('fastest')
# Define a list of colours
column_colours = ['red', 'green', 'blue', 'yellow', 'white', 'orange',
'aqua', 'olive', 'misty rose', 'salmon', 'spring green',
'fuchsia', 'deep sky blue', 'silver', 'aquamarine',
'orange red', 'seashell', 'chocolate', 'light steel blue',
'tomato', 'chartreuse', 'bisque', 'dark orchid',
'powder blue', 'gainsboro']
# Determine how many rows we can fit between the maximum
# and minimum y-coords, separated by the given grid size
number_of_rows = max_coord * 2 // grid_size
# Do the same action multiple times, with the only
# difference being the row number
for row_number in range(number_of_rows):
# Go to the start of the row
goto(-max_coord, max_coord - row_number * grid_size)
# Do the same action multiple times, with the only
# difference being the colour
for colour in column_colours:
color(colour)
dot(dot_size)
forward(grid_size)
# Exit gracefully
hideturtle()
done()
|
[
"noreply@github.com"
] |
ARWA-ALraddadi.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.