blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
70a6b84238efa4e023179a2ad24b371742532fce
|
fbb141c9b99c4c08ce2c0acfe13630d694d98744
|
/7-stack/4.10-shu-zu-zhong-de-ni-xu-dui-lcof.py
|
f04c9947f38d38bf2a749719998cd041df3b5b3b
|
[] |
no_license
|
huixian3/algorithm017
|
1534bc8a0364595b056e0f346cfe9fa8b8fee3bd
|
f43c99dc7810de863f8cd79115e272ac65ce9257
|
refs/heads/master
| 2023-04-02T07:10:03.670003
| 2021-04-13T14:38:36
| 2021-04-13T14:38:36
| 297,989,771
| 0
| 0
| null | 2020-09-23T14:05:41
| 2020-09-23T14:05:40
| null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
'''
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。
输入一个数组,求出这个数组中的逆序对的总数。
'''
# 归并排序 同 逆序对,分治
# 在megrge环节中计数即可,计数方法是,左边数据大于右边数据元素的pair数量
class Solution(object):
def reversePairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
self.cnt = 0
def merge(nums, start, mid, end, temp):
i, j = start, mid + 1
while i <= mid and j <= end:
if nums[i] <= nums[j]:
temp.append(nums[i])
i += 1
else:
self.cnt += mid - i + 1
temp.append(nums[j])
j += 1
while i <= mid:
temp.append(nums[i])
i += 1
while j <= end:
temp.append(nums[j])
j += 1
for i in range(len(temp)):
nums[start + i] = temp[i]
temp.clear()
def mergeSort(nums, start, end, temp):
if start >= end: return
mid = (start + end) >> 1
mergeSort(nums, start, mid, temp)
mergeSort(nums, mid + 1, end, temp)
merge(nums, start, mid, end, temp)
mergeSort(nums, 0, len(nums) - 1, [])
return self.cnt
|
[
"zhanhuixian@meituan.com"
] |
zhanhuixian@meituan.com
|
8a0079dd597dba447df0d9aed6437df677f2accb
|
710026f64d3a23913ae71d2300147b371f5cb75b
|
/gammapy/data/tests/test_all.py
|
138034335de153523316b69996357d13979c5972
|
[] |
no_license
|
Cadair/gammapy
|
557c01e33d93fe6cc2daaac35b53590d33e31fbc
|
19f4fdd299b8c3495c732fc412f5d18cb9df3590
|
refs/heads/master
| 2020-12-13T21:52:37.790005
| 2014-02-20T15:15:10
| 2014-02-20T15:15:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
from numpy.testing import assert_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
from .. import poisson_stats_image
def test_poisson_stats_image():
"""Get the data file via the gammapy.data.poisson_stats_image function"""
data = poisson_stats_image()
assert data.sum() == 40896
def test_poisson_stats_image_direct():
"""Get the data file directly via get_pkg_data_filename"""
filename = get_pkg_data_filename('../poisson_stats_image/counts.fits.gz')
data = fits.getdata(filename)
assert data.sum() == 40896
def test_poisson_stats_extra_info():
images = poisson_stats_image(extra_info=True)
refs = dict(counts=40896, model=41000, source=1000, background=40000)
for name, expected in refs.items():
assert_allclose(images[name].sum(), expected)
|
[
"Deil.Christoph@gmail.com"
] |
Deil.Christoph@gmail.com
|
155abf1cb8c24811bbe7251caef3d6eb6e1d3629
|
617ff229b63165368e32a303488b29c738a5378a
|
/src/bad smell/plot_smell_fft.py
|
c75518ce5eedbec995ac26155c38b340be32e0b4
|
[] |
no_license
|
dimpisingh/e-dom
|
a1ae76229a31c0a5dcc725a80e7a741be660a0da
|
a820874545e97ec10580db6dd11e35c7eec65abc
|
refs/heads/master
| 2022-05-13T07:27:31.180506
| 2019-04-10T13:53:26
| 2019-04-10T13:53:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,425
|
py
|
import os
import plotly
# plotly.tools.set_credentials_file(username='dichen001', api_key='czrCH0mQHmX5HLXSHBqS')
plotly.tools.set_credentials_file(username='amritbhanu', api_key='cuaXxPfbSxptk2irXf7P')
import plotly.plotly as py
import plotly.graph_objs as go
import cPickle
import pickle
cwd = os.getcwd()
data_path = os.path.join(cwd,"..","..","data", "smell")
details_path = os.path.join(data_path, 'smell_details_38-MDLP.pkl')
details = cPickle.load(open(details_path, 'rb'))
with open(os.path.join(data_path, 'dodge.pickle'), 'rb') as handle:
dodge = pickle.load(handle)
n1, n2, n3, n4 = "DataClass", "FeatureEnvy", "GodClass", "LongMethod"
t1, t2, t3, t4 = "DataClass", "FeatureEnvy", "GodClass", "LongMethod"
classifiers = ["DT", "RF", "LR", "kNN", "FFT-Dist2Heaven", "Dodge_0.2_30"]
colors = ["#AED6F1", "#5DADE2", "#2874A6", "#1B4F72", "#000000", "#FF5722"]#, "#E53935"]
data = []
l = len(details[n1][classifiers[0]]['dist2heaven'])
x = [t1] * l + [t2] * l + [t3] * l + [t4] * l
x1 = [t1] * 21 + [t2] * 21 + [t3] * 21 + [t4] * 21
for i, clf in enumerate(classifiers):
if clf != "Dodge_0.2_30":
tmp_bar = go.Box(
y=sorted(details[n1][clf]['dist2heaven']) +
sorted(details[n2][clf]['dist2heaven']) +
sorted(details[n3][clf]['dist2heaven']) +
sorted(details[n4][clf]['dist2heaven']),
x=x,
name=clf,
marker=dict(
color=colors[i]
)
)
else:
tmp_bar = go.Box(
y=sorted(dodge[n1]) +
sorted(dodge[n2]) +
sorted(dodge[n3]) +
sorted(dodge[n4]),
x=x1,
name=clf,
marker=dict(
color=colors[i]
)
)
data.append(tmp_bar)
layout = go.Layout(
autosize=True,
title="Bad Smell - 25 Times",
font=dict(size=18),
yaxis=dict(
title='Distance to Heaven',
zeroline=False,
titlefont=dict(size=20),
tickfont=dict(size=24),
automargin=True,
),
xaxis=dict(
title='Bad Smell Dataset (very small)',
zeroline=False,
titlefont=dict(size=24),
tickfont=dict(size=20),
tickangle=-45,
automargin=True,
),
boxmode='group',
legend=dict(font=dict(size=20)
)
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename="Smell - 25 Times")
|
[
"amritbhanu@gmail.com"
] |
amritbhanu@gmail.com
|
6c6d5f913ad89423170d7e4e728f2d9b67184ad4
|
5bb8b4c7faeebd16da16ecbcd4a98aabaf688e8f
|
/data_tools/walker/src-cikm/build_graph2/citations.py
|
2438338b186b181a26af7fd8e16ccbc3d15dfd74
|
[] |
no_license
|
xiaoqinzhe/vrdetection
|
014fc2b61c9b30dd2699fdba41089b18b7f060be
|
604a812a21a98d72ba8e23a716eb72153bdaa7c4
|
refs/heads/master
| 2023-07-04T07:44:12.141404
| 2021-08-01T06:21:17
| 2021-08-01T06:21:17
| 150,063,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
#coding:utf-8
import json
file_path = '/mnt/hdd2/dblp/dblp_ref.json'
citation_file_path = '/mnt/hdd2/cikm/citation.txt'
with open(file_path) as ifile, open(citation_file_path, 'w') as ofile:
for line in ifile:
paper = json.loads(line)
if 'references' not in paper:
continue
output_papers = [paper['_id']]
output_papers += paper['references']
ofile.write('{}\n'.format(' '.join(output_papers)))
|
[
"xiaoqinzhe@qq.com"
] |
xiaoqinzhe@qq.com
|
d90f4c250ad6540185c4685ac49cf4e5df824ab7
|
b4f661f1153637d9cfec18e4cf56b64582c31385
|
/src/Python/304.二维区域和检索-矩阵不可变.py
|
fd58f373a91c848ff44f0bd8495b1cc29de69c8a
|
[] |
no_license
|
Icedomain/LeetCode
|
12dd24bbe2d7aba1f6ebe61bffe4c5e6284fbd06
|
4bc8e41499b9c884d64b5a44fe783fdb7030676e
|
refs/heads/master
| 2021-02-15T15:12:15.009790
| 2020-09-22T11:37:59
| 2020-09-22T11:37:59
| 244,909,740
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
#
# @lc app=leetcode.cn id=304 lang=python3
#
# [304] 二维区域和检索 - 矩阵不可变
#
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
if not matrix:
return
n, m = len(matrix), len(matrix[0])
self.sums = [ [0 for j in range(m+1)] for i in range(n+1) ]
for i in range(1, n+1):
for j in range(1, m+1):
self.sums[i][j] = matrix[i-1][j-1] + self.sums[i][j-1] + self.sums[i-1][j] - self.sums[i-1][j-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
row1, col1, row2, col2 = row1+1, col1+1, row2+1, col2+1
return self.sums[row2][col2] - self.sums[row2][col1-1] - self.sums[row1-1][col2] + self.sums[row1-1][col1-1]
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
|
[
"1271029566@qq.com"
] |
1271029566@qq.com
|
b04f5902369c128c688593ca330bb43b55ffa29c
|
a8ed252f3b76a8d134f026ccf0204c5e5e918edb
|
/apps/common/views.py
|
5651d62f133a31899ed83656bb5d35032146918d
|
[
"MIT"
] |
permissive
|
F483/bitcoin-bounties.com
|
a8c84bfe61df25bae93f1bfd3c055754414cbe27
|
64a4a973fa38a4fb54178d855c1b82ec18799628
|
refs/heads/master
| 2020-04-25T23:19:32.859170
| 2014-11-22T15:40:48
| 2014-11-22T15:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE.TXT file)
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from apps.common.utils.templates import render_response
@require_http_methods(['GET'])
def render_template(request, template, context=None):
return render_response(request, template, context and context or {})
@require_http_methods(['GET'])
def redirect_to(request, url):
return HttpResponseRedirect(url)
|
[
"fabian.barkhau@gmail.com"
] |
fabian.barkhau@gmail.com
|
e66adf2a6d1f32f1467ae3ff1e1bdc2c509baa2b
|
33fa46042e7decb01008b73202e5d24ce6bad03a
|
/config/settings/test.py
|
ece2ef2fbcdf6949a10c811059a6d1e4cacc7b42
|
[
"MIT"
] |
permissive
|
rlaneyjr/project_pawz
|
2d2ef8ef8a801e788c139a35bf82d72aafac8f69
|
27f316ef35968ed1319ec0585a050ebed795763a
|
refs/heads/master
| 2022-12-05T11:39:04.384922
| 2019-05-28T22:24:24
| 2019-05-28T22:24:24
| 185,061,794
| 0
| 0
|
MIT
| 2022-12-03T08:21:14
| 2019-05-05T17:28:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="xCRogvYltWv6xc9QaA51CNCNXySMe9Oq1PY8x0avsZU15HEZq9kpa2aTphciScG0")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
|
[
"rlaneyjr@gmail.com"
] |
rlaneyjr@gmail.com
|
abd3bcdbedfbf53aa74ec49c4c5efae200ede1c3
|
536656cd89e4fa3a92b5dcab28657d60d1d244bd
|
/tools/perf/core/results_processor/command_line_unittest.py
|
5dd1f9abc72545c59b4f8dfebf02d90dec2e566e
|
[
"Zlib",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"MIT",
"APSL-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] |
permissive
|
ECS-251-W2020/chromium
|
79caebf50443f297557d9510620bf8d44a68399a
|
ac814e85cb870a6b569e184c7a60a70ff3cb19f9
|
refs/heads/master
| 2022-08-19T17:42:46.887573
| 2020-03-18T06:08:44
| 2020-03-18T06:08:44
| 248,141,336
| 7
| 8
|
BSD-3-Clause
| 2022-07-06T20:32:48
| 2020-03-18T04:52:18
| null |
UTF-8
|
Python
| false
| false
| 7,545
|
py
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for results_processor.
These tests mostly test that argument parsing and processing work as expected.
They mock out accesses to the operating system, so no files are actually read
nor written.
"""
import datetime
import posixpath
import re
import unittest
import mock
from core.results_processor import command_line
# To easily mock module level symbols within the command_line module.
def module(symbol):
return 'core.results_processor.command_line.' + symbol
class ProcessOptionsTestCase(unittest.TestCase):
def setUp(self):
self.standalone = False
# Mock os module within results_processor so path manipulations do not
# depend on the file system of the test environment.
mock_os = mock.patch(module('os')).start()
def realpath(path):
return posixpath.normpath(posixpath.join(mock_os.getcwd(), path))
def expanduser(path):
return re.sub(r'~', '/path/to/home', path)
mock_os.getcwd.return_value = '/path/to/curdir'
mock_os.path.realpath.side_effect = realpath
mock_os.path.expanduser.side_effect = expanduser
mock_os.path.dirname.side_effect = posixpath.dirname
mock_os.path.join.side_effect = posixpath.join
mock.patch(module('_DefaultOutputDir'),
return_value='/path/to/output_dir').start()
mock.patch(module('path_util.GetChromiumSrcDir'),
return_value='/path/to/chromium').start()
def tearDown(self):
mock.patch.stopall()
def ParseArgs(self, args):
parser = command_line.ArgumentParser(standalone=self.standalone)
options = parser.parse_args(args)
command_line.ProcessOptions(options)
return options
class TestProcessOptions(ProcessOptionsTestCase):
def testOutputDir_default(self):
options = self.ParseArgs([])
self.assertEqual(options.output_dir, '/path/to/output_dir')
def testOutputDir_homeDir(self):
options = self.ParseArgs(['--output-dir', '~/my_outputs'])
self.assertEqual(options.output_dir, '/path/to/home/my_outputs')
def testOutputDir_relPath(self):
options = self.ParseArgs(['--output-dir', 'my_outputs'])
self.assertEqual(options.output_dir, '/path/to/curdir/my_outputs')
def testOutputDir_absPath(self):
options = self.ParseArgs(['--output-dir', '/path/to/somewhere/else'])
self.assertEqual(options.output_dir, '/path/to/somewhere/else')
@mock.patch(module('datetime'))
def testIntermediateDir_default(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = (
datetime.datetime(2015, 10, 21, 7, 28))
options = self.ParseArgs(['--output-dir', '/output'])
self.assertEqual(options.intermediate_dir,
'/output/artifacts/run_20151021T072800Z')
@mock.patch(module('datetime'))
def testIntermediateDir_withResultsLabel(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = (
datetime.datetime(2015, 10, 21, 7, 28))
options = self.ParseArgs(
['--output-dir', '/output', '--results-label', 'test my feature'])
self.assertEqual(options.intermediate_dir,
'/output/artifacts/test_my_feature_20151021T072800Z')
def testUploadBucket_noUploadResults(self):
options = self.ParseArgs([])
self.assertFalse(options.upload_results)
self.assertIsNone(options.upload_bucket)
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToDefaultBucket(self, mock_storage):
mock_storage.BUCKET_ALIASES = {'output': 'default-bucket'}
options = self.ParseArgs(['--upload-results'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'default-bucket')
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToBucket(self, mock_storage):
mock_storage.BUCKET_ALIASES = {'output': 'default-bucket'}
options = self.ParseArgs(
['--upload-results', '--upload-bucket', 'my_bucket'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'my_bucket')
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToAlias(self, mock_storage):
mock_storage.BUCKET_ALIASES = {
'output': 'default-bucket', 'special': 'some-special-bucket'}
options = self.ParseArgs(
['--upload-results', '--upload-bucket', 'special'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'some-special-bucket')
def testDefaultOutputFormat(self):
options = self.ParseArgs([])
self.assertEqual(options.output_formats, ['html'])
def testUnkownOutputFormatRaises(self):
with self.assertRaises(SystemExit):
self.ParseArgs(['--output-format', 'unknown'])
def testNoDuplicateOutputFormats(self):
options = self.ParseArgs(
['--output-format', 'html', '--output-format', 'csv',
'--output-format', 'html', '--output-format', 'csv'])
self.assertEqual(options.output_formats, ['csv', 'html'])
def testTraceProcessorPath_noBuildDir(self):
with mock.patch(module('os.environ.get'), return_value=None):
options = self.ParseArgs([])
self.assertIsNone(options.trace_processor_path)
def testTraceProcessorPath_chromiumOutputDir(self):
def isfile(path):
return path == '/path/to/chromium/out_test/Debug/trace_processor_shell'
def env_get(name):
if name == 'CHROMIUM_OUTPUT_DIR':
return '/path/to/chromium/out_test/Debug'
with mock.patch(module('os.path.isfile')) as isfile_patch:
with mock.patch(module('os.environ.get')) as env_patch:
isfile_patch.side_effect = isfile
env_patch.side_effect = env_get
options = self.ParseArgs([])
self.assertEqual(options.trace_processor_path,
'/path/to/chromium/out_test/Debug/trace_processor_shell')
def testTraceProcessorPath_oneBuildDir(self):
def isfile(path):
return path == '/path/to/chromium/out/Release/trace_processor_shell'
with mock.patch(module('os.path.isfile')) as isfile_patch:
isfile_patch.side_effect = isfile
options = self.ParseArgs([])
self.assertEqual(options.trace_processor_path,
'/path/to/chromium/out/Release/trace_processor_shell')
def testTraceProcessorPath_twoBuildDirs(self):
def isfile(path):
return path in ['/path/to/chromium/out/Release/trace_processor_shell',
'/path/to/chromium/out/Debug/trace_processor_shell']
with mock.patch(module('os.path.isfile')) as isfile_patch:
isfile_patch.side_effect = isfile
options = self.ParseArgs([])
self.assertIsNone(options.trace_processor_path)
class StandaloneTestProcessOptions(ProcessOptionsTestCase):
def setUp(self):
super(StandaloneTestProcessOptions, self).setUp()
self.standalone = True
def testOutputFormatRequired(self):
with self.assertRaises(SystemExit):
self.ParseArgs([])
def testIntermediateDirRequired(self):
with self.assertRaises(SystemExit):
self.ParseArgs(['--output-format', 'json-test-results'])
def testSuccessful(self):
options = self.ParseArgs(
['--output-format', 'json-test-results',
'--intermediate-dir', 'some_dir'])
self.assertEqual(options.output_formats, ['json-test-results'])
self.assertEqual(options.intermediate_dir, '/path/to/curdir/some_dir')
self.assertEqual(options.output_dir, '/path/to/output_dir')
|
[
"pcding@ucdavis.edu"
] |
pcding@ucdavis.edu
|
53bd21551303a9812df6895c3a5bcf7d5342dedb
|
d772869033c47a666622e9ee518bb306db5451a5
|
/unified/modules/main/categories/crm/entities/deal.py
|
0bcaee514289e3195334ae924481bbb68f1f6ee0
|
[] |
no_license
|
funny2code/unified_api
|
920f1e19b2304e331b019f8a531d412b8759e725
|
ffa28ba0e5c0bd8ad7dd44a468e3d1e777bba725
|
refs/heads/main
| 2023-08-31T16:00:17.074427
| 2021-10-04T04:09:45
| 2021-10-04T04:09:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
from dataclasses import dataclass
@dataclass
class Deal:
deal_id: str = None
account_id: str = None
name: str = None
close_date: str = None
description: str = None
stage_id: str = None
value: str = None
probability: str = None
owner_id : str = None
contact_id: str = None
currency_id: str = None
|
[
"baidawardipendar@gmail.com"
] |
baidawardipendar@gmail.com
|
611e284cb8350ee5e0530de97ff2121e728b6f84
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/279/66340/submittedfiles/testes.py
|
7ccd166e96f0a4767cbe69f6d5511f7efefae093
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("andre bezerra de barrros ")
print("24")
print(11+1037)
print((9*35+160)/5)
print(3.14159*(10/2)*(10/2)*30)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1c460f138444384b52eda73ccc1a7db8da23d76b
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/3999/codes/1635_2442.py
|
7d4cb10f6eb2bb08c1ebeeb9ad94276bb7866760
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
# Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
num=int(input("Digite um numero: "))
if (num%2==0):
mensagem="par"
else:
mensagem="impar"
print(mensagem)
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
78cd02f35eb33e0dca1c10049960dc96d060c161
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part006597.py
|
f32e3be699fb19351abe7424a78bedb56216f820
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher125926(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher125926._instance is None:
CommutativeMatcher125926._instance = CommutativeMatcher125926()
return CommutativeMatcher125926._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 125925
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
9b2b62d6c9e2308e570b19de28085ae1f34c35a9
|
7bcb0b7f721c8fa31da7574f13ed0056127715b3
|
/src/apps/api/resources/subscription.py
|
62e5a4c74c86c9613ca6bd0c1ba0aeca5007fa3d
|
[] |
no_license
|
simonchapman1986/ripe
|
09eb9452ea16730c105c452eefb6a6791c1b4a69
|
c129da2249b5f75015f528e4056e9a2957b7d884
|
refs/heads/master
| 2022-07-22T05:15:38.485619
| 2016-01-15T12:53:43
| 2016-01-15T12:53:43
| 49,718,671
| 1
| 0
| null | 2022-07-07T22:50:50
| 2016-01-15T12:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
from apps.base.models import FactServicesStorefrontSubscription
from tastypie.resources import ModelResource
class SubscriptionResource(ModelResource):
class Meta:
queryset = FactServicesStorefrontSubscription.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'entry'
filtering = {
'event_time': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
}
|
[
"simon-ch@moving-picture.com"
] |
simon-ch@moving-picture.com
|
b42508610856a9da00e6b77138872e63aab1b223
|
50f04c633f36e9d64c40c4f1b434ed0c24e447c7
|
/argparse-examples/positionalarg.py
|
047332844d22ec1332227b4bb8bc6c545fec0f22
|
[] |
no_license
|
sarahchou/python-practice
|
883ba7dedd60b2cc18d5d73ef7d3cbb74f09dede
|
2a3d10144b74460d8ec513e3c7d49bdb48107596
|
refs/heads/master
| 2022-11-11T10:06:12.944579
| 2018-06-11T22:14:06
| 2018-06-11T22:14:06
| 136,985,077
| 0
| 1
| null | 2022-10-20T08:48:36
| 2018-06-11T21:54:46
|
Python
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
#Introduction to positional arguments
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("echo", help="echo the string you use here")
parser.add_argument("square", help="display a square of a given number", type=int)
args = parser.parse_args()
# print args.echo
print args.square**2
|
[
"chou.s@husky.neu.edu"
] |
chou.s@husky.neu.edu
|
75bbbe754d344cb243580cb495baebe07914d27a
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_9/alhada001/question1.py
|
a7d1ab9e4c2362dd2297a16531f5457babdf6f3d
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
#Adam Alhadeff
import math
file = input("Enter the marks filename:\n")
f = open(file, "r")
length = len(open(file).readlines())
names = []
marks = []
line = f.readline()
count = 0
for i in range(length):
split = line.split(",")
names.append(split[0])
marks.append(split[1])
count += 1
line = f.readline()
total = 0
for i in range(len(marks)):
total = total + int(marks[i])
average = total/count
SDT = 0
for i in range(len(marks)):
SDT = SDT + (int(marks[i])-average)*(int(marks[i])-average)
SD = math.sqrt(SDT/count)
print("The average is:","%0.2f" % (average))
print("The std deviation is:","%0.2f" % (SD))
NumStudents = 0
for i in range(len(marks)):
if int(marks[i]) < (average-SD):
NumStudents += 1
if NumStudents != 0:
print("List of students who need to see an advisor:")
for i in range(len(marks)):
if int(marks[i]) < (average-SD):
print(names[i])
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
48cf3ed92a3e10d96e85fb1b15ba0340b11f90da
|
9dba8607dce414f9905700d7a4ac44668de5e1f1
|
/puente_quintanavides/combinaciones/def_hip_elscp_resumidas_xci.py
|
da24fbbb055eb1ffb3374131c83a39767b1d825f
|
[] |
no_license
|
anaiortega/XCmodels
|
c0463ffe38531578aee281456e88528882255cd7
|
e9b8c2f996a21b8aa3314242f3cc12b0e391b5df
|
refs/heads/master
| 2023-08-16T22:44:01.168775
| 2023-08-14T18:15:10
| 2023-08-14T18:15:10
| 141,140,177
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,362
|
py
|
\combinacion["ELSCP001"]{ descomp("1.00*G1 + 0.70*TC1V1")}
\combinacion["ELSCP002"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.20*NV")}
\combinacion["ELSCP009"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2")}
\combinacion["ELSCP010"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP021"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2")}
\combinacion["ELSCP022"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP041"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2")}
\combinacion["ELSCP042"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2 + 0.20*NV")}
\combinacion["ELSCP053"]{ descomp("1.00*G1 + 0.70*TC1V2")}
\combinacion["ELSCP054"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.20*NV")}
\combinacion["ELSCP061"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1")}
\combinacion["ELSCP062"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP073"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1")}
\combinacion["ELSCP074"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1 + 0.20*NV")}
\combinacion["ELSCP093"]{ descomp("1.00*G1 + 0.70*TC2V1")}
\combinacion["ELSCP094"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.20*NV")}
\combinacion["ELSCP109"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2")}
\combinacion["ELSCP110"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP129"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2")}
\combinacion["ELSCP130"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP173"]{ descomp("1.00*G1 + 0.70*TC2V2")}
\combinacion["ELSCP174"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP189"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1")}
\combinacion["ELSCP190"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP209"]{ descomp("1.00*G1 + 0.70*TC3V1")}
\combinacion["ELSCP210"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP217"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2")}
\combinacion["ELSCP218"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP229"]{ descomp("1.00*G1 + 0.70*TC3V2")}
\combinacion["ELSCP230"]{ descomp("1.00*G1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP453"]{ descomp("1.00*G1 + 0.60*NV")}
\combinacion["ELSCP454"]{ descomp("1.00*G1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP456"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP458"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP461"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP465"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP470"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.60*NV")}
\combinacion["ELSCP474"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP479"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP490"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.60*NV")}
\combinacion["ELSCP492"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP495"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1 + 0.60*NV")}
\combinacion["ELSCP500"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.60*NV")}
\combinacion["ELSCP502"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP505"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP510"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2 + 0.60*NV")}
|
[
"l.pereztato@gmail.com"
] |
l.pereztato@gmail.com
|
21b67cd73c3425afe749638e23831431e4628084
|
0f07107b016d2aee64788966b9f0d322ac46b998
|
/moya/docgen/theme.py
|
39c3d707e1310f7b2799f5a59c83826bd99563b2
|
[
"MIT"
] |
permissive
|
fkztw/moya
|
35f48cdc5d5723b04c671947099b0b1af1c7cc7a
|
78b91d87b4519f91dfdd2b40dab44e72f201a843
|
refs/heads/master
| 2023-08-09T09:20:21.968908
| 2019-02-03T18:18:54
| 2019-02-03T18:18:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
from .. import iniparse
from fs.path import dirname, pathjoin
class Page(object):
def __init__(self, doc_class, settings):
self.doc_class = doc_class
self.settings = settings
def __repr__(self):
return "Page(%r, %r)" % (self.doc_class, self.settings)
def get(self, context, settings_name):
return context.sub(self.settings.get(settings_name, ""))
def get_path(self, context):
return context.sub(self.settings.get("path", ""))
class Theme(object):
def __init__(self, fs):
self.fs = fs
self.cfg = None
self.theme_settings = None
self.pages = []
self.read()
def get(self, section_name, key, default=None):
section = self.cfg.get(section_name, None)
if section is None:
return default
return section.get(key, default)
def read(self):
with self.fs.open("theme.ini", "rb") as settings_file:
cfg = iniparse.parse(settings_file)
self.cfg = cfg
self.theme_settings = cfg.get("theme", {})
for section, settings in cfg.items():
what, _, name = section.partition(":")
if what == "page":
page = Page(name, settings)
self.pages.append(page)
def get_pages(self, doc):
doc_class = doc.doc_class
for page in self.pages:
if page.doc_class == doc_class:
yield page
def get_relative_path(self, path):
ini_path = dirname(self.fs.getsyspath("theme.ini"))
path = pathjoin(ini_path, path)
return path
|
[
"willmcgugan@gmail.com"
] |
willmcgugan@gmail.com
|
5783ce1e2789f35719b925425e95f886b574fd59
|
76d8f9d741d4e0bbd15a2c29fa77d041c01ea9bf
|
/exercise/keras/trafficsign.py
|
a422aaf4c134f2d7e34383236a64a9a9fb67fcf1
|
[] |
no_license
|
LevinJ/Behavioural-Cloning-P3
|
d92bf3500797019a3fcf038a5c0e817f445e7a39
|
fff8993ba2671c9664ab65899db952e2f5de37da
|
refs/heads/master
| 2020-06-22T03:16:27.869561
| 2016-12-19T00:19:06
| 2016-12-19T00:19:06
| 74,758,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,848
|
py
|
from utility.dumpload import DumpLoad
import numpy as np
from sklearn.preprocessing import scale
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Activation
from sklearn.preprocessing import OneHotEncoder
from keras.optimizers import Adam
from sklearn.cross_validation import train_test_split
from keras.layers import Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
class TrafficeSign(object):
def __init__(self):
return
def __get_data(self, filepath):
dump_load = DumpLoad(filepath)
data = dump_load.load()
features = data['features']
labels = data['labels'][:, np.newaxis]
return features, labels
def load_data(self):
self.X_train, self.y_train =self. __get_data('./train.p')
self.X_test, self.y_test = self.__get_data('./test.p')
assert(self.X_train.shape[0] == self.y_train.shape[0]), "The number of images is not equal to the number of labels."
assert(self.X_train.shape[1:] == (32,32,3)), "The dimensions of the images are not 32 x 32 x 3."
return
def normalize_data(self):
max = 0.5
min = -0.5
train_min = self.X_train.min()
train_max = self.X_train.max()
self.X_train = self.X_train.astype('float32')
self.X_test = self.X_test.astype('float32')
#normalize training/val data
self.X_train = (self.X_train - train_min) / (train_max - train_min) * (max - min) + min
#normalize test data
self.X_test = ((self.X_test - train_min) / (train_max - train_min)) * (max - min) + min
# scaler = MinMaxScaler(feature_range=(-0.5, 0.5))
# self.X_train = scaler.fit_transform(self.X_train.ravel())
assert(round(np.mean(self.X_train)) == 0), "The mean of the input data is: %f" % np.mean(self.X_train)
assert(np.min(self.X_train) == -0.5 and np.max(self.X_train) == 0.5), "The range of the input data is: %.1f to %.1f" % (np.min(self.X_train), np.max(self.X_train))
return
def two_layer_net(self):
model = Sequential()
model.add(Dense(128, input_dim=32*32*3, name="hidden1"))
model.add(Activation("relu"))
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(model.get_layer(name="hidden1").input_shape == (None, 32*32*3)), "The input shape is: %s" % model.get_layer(name="hidden1").input_shape
assert(model.get_layer(name="output").output_shape == (None, 43)), "The output shape is: %s" % model.get_layer(name="output").output_shape
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.encoder = OneHotEncoder(sparse=False).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
history = model.fit(self.X_train.reshape(-1,32*32*3), y_train_encoded, nb_epoch=2, batch_size=32, verbose=2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
print("The training accuracy was: {}".format( history.history['acc']))
assert(history.history['acc'][0] > 0.5), "The training accuracy was: {}".format( history.history['acc'])
return
def two_layer_net_split(self):
model = Sequential()
model.add(Dense(128, input_dim=32*32*3, name="hidden1"))
model.add(Activation("relu"))
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(model.get_layer(name="hidden1").input_shape == (None, 32*32*3)), "The input shape is: %s" % model.get_layer(name="hidden1").input_shape
assert(model.get_layer(name="output").output_shape == (None, 43)), "The output shape is: %s" % model.get_layer(name="output").output_shape
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, random_state=42)
self.encoder = OneHotEncoder(sparse=False,n_values = 43).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
y_val_encoded = self.encoder.transform(self.y_val)
history = model.fit(self.X_train.reshape(-1,32*32*3), y_train_encoded, nb_epoch=2, batch_size=32, verbose=2,
validation_data=(self.X_val.reshape(-1,32*32*3), y_val_encoded))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(round(self.X_train.shape[0] / float(self.X_val.shape[0])) == 3), "The training set is %.3f times larger than the validation set." % self.X_train.shape[0] / float(self.X_val.shape[0])
assert(history.history['val_acc'][0] > 0.6), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
return
def cnn_net(self):
model = Sequential()
#layer 1
model.add(Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=(32,32,3), name="conv1"))
model.add(Activation('relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
#layer 2
model.add(Flatten())
model.add(Dense(128, name="hidden1"))
model.add(Activation("relu"))
#layer 3
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, random_state=42)
self.encoder = OneHotEncoder(sparse=False,n_values = 43).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
y_val_encoded = self.encoder.transform(self.y_val)
y_test_encoded = self.encoder.transform(self.y_test)
history = model.fit(self.X_train, y_train_encoded, nb_epoch=30, batch_size=32, verbose=2,
validation_data=(self.X_val, y_val_encoded))
# STOP: Do not change the tests below. Your implementation should pass these tests.
#assert(history.history['val_acc'][0] > 0.9), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
_, train_acc = model.evaluate(self.X_train, y_train_encoded, verbose=0)
_, val_acc = model.evaluate(self.X_val, y_val_encoded, verbose=0)
_, test_acc = model.evaluate(self.X_test, y_test_encoded, verbose=0)
print('train{:.3f}, val{:.3f}: test{:.3f}'.format(train_acc, val_acc, test_acc))
return
def run(self):
self.load_data()
self.normalize_data()
# self.two_layer_net()
# self.two_layer_net_split()
self.cnn_net()
return
if __name__ == "__main__":
obj= TrafficeSign()
obj.run()
|
[
"jianzhirong@gmail.com"
] |
jianzhirong@gmail.com
|
92ea114b1907807cc47d45d2b77ee51981cafab8
|
887f2e664c6d92f17e784f57022333a2fb859d06
|
/analysis/plotMove.py
|
252a91a4c6be6dc9ba8b647cac05970a426f3080
|
[] |
no_license
|
ctorney/dolphinUnion
|
1968e258c6045060b2c921bd723d0ef0daea0147
|
9d7212d172a8a48a36fc4870fcdb04d66130bb76
|
refs/heads/master
| 2021-01-19T04:40:57.286526
| 2017-08-17T20:44:58
| 2017-08-17T20:44:58
| 46,424,670
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,332
|
py
|
import numpy as np
import pandas as pd
import os, re
import math
import time
from scipy import interpolate
from scipy import ndimage
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.animation as ani
HD = os.getenv('HOME')
FILELIST = HD + '/workspace/dolphinUnion/tracking/solo/fileList.csv'
DATADIR = HD + '/Dropbox/dolphin_union/2015_footage/Solo/processedTracks/'
df = pd.read_csv(FILELIST)
for index, row in df.iterrows():
noext, ext = os.path.splitext(row.filename)
posfilename = DATADIR + '/TRACKS_' + str(index) + '_' + noext + '.csv'
gridfilename = DATADIR + '/GRID_' + str(index) + '_' + noext + '.npy'
gridPosfilename = DATADIR + '/GRIDPOS_' + str(index) + '_' + noext + '.npy'
posDF = pd.read_csv(posfilename)
posDF = posDF[posDF['frame']%60==0]
# posDF['x']=posDF['x']-min(posDF['x'])
# posDF['y']=posDF['y']-min(posDF['y'])
# xrange = max(posDF['x'])
# yrange = max(posDF['y'])
# nx = math.ceil(xrange/32)
# ny = math.ceil(yrange/32)
# grid = np.zeros((nx,ny,2))
# gridPos = np.zeros((nx,ny,2))
# xh = np.cos(posDF['heading'].values)
# yh = np.sin(posDF['heading'].values)
# xdirs = posDF['dx'].values
# ydirs = posDF['dy'].values
# xp = posDF['x'].values
# yp = posDF['y'].values
# kappa = 32.0*32.0
# for i in range(nx):
# for j in range(ny):
# gx = i * 32
# gy = j * 32
# dists = (((posDF['x'].values - gx)**2 + (posDF['y'].values - gy)**2))
# weights = np.exp(-dists/kappa)
# gridPos[i,j,0]=gx
# gridPos[i,j,1]=gy
# xav = np.sum(weights*xdirs)/np.sum(weights)
# yav = np.sum(weights*ydirs)/np.sum(weights)
# grid[i,j,0]=xav/math.sqrt(xav**2+yav**2)
# grid[i,j,1]=yav/math.sqrt(xav**2+yav**2)
grid = np.load(gridfilename)
gridPos = np.load(gridPosfilename)
#plt.quiver(xp,yp,xh,yh,angles='xy', scale_units='xy', color='r', scale=1.0/32.0)
#plt.quiver(gridPos[:,:,0],gridPos[:,:,1],grid[:,:,0],grid[:,:,1],angles='xy', scale_units='xy', scale=1.0/32.0)
winLen = 30
w = np.kaiser(winLen,1)
w = w/w.sum()
maxRange = 0
flen = len(posDF.groupby('frame'))
Xcentroids = np.zeros((flen))
Ycentroids = np.zeros((flen))
fc=0
for fnum, frame in posDF.groupby('frame'):
dist = max(frame['x'].values)-min(frame['x'].values)
if dist>maxRange:
maxRange=dist
dist = max(frame['y'].values)-min(frame['y'].values)
if dist>maxRange:
maxRange=dist
Xcentroids[fc] = np.average(frame['x'].values)
Ycentroids[fc] = np.average(frame['y'].values)
fc=fc+1
Xcentroids = np.r_[np.ones((winLen))*Xcentroids[0],Xcentroids,np.ones((winLen))*Xcentroids[-1]]
Xcentroids = np.convolve(w/w.sum(),Xcentroids,mode='same')[(winLen):-(winLen)]
Ycentroids = np.r_[np.ones((winLen))*Ycentroids[0],Ycentroids,np.ones((winLen))*Ycentroids[-1]]
Ycentroids = np.convolve(w/w.sum(),Ycentroids,mode='same')[(winLen):-(winLen)]
sz = math.ceil(maxRange/32)*16
fig = plt.figure()#figsize=(10, 10), dpi=5)
totalFrames =500
fc = 0
#with writer.saving(fig, "move.mp4", totalFrames):# len(posDF.groupby('frame'))):
for fnum, frame in posDF.groupby('frame'):
fc = fc + 1
if fc>totalFrames:
break
#frame = frame[frame.c_id==0]
xp = frame['x'].values
yp = frame['y'].values
xh = 0.1*frame['dx'].values
yh = 0.1*frame['dy'].values
xc = Xcentroids[fc]
yc = Ycentroids[fc]
plt.clf()
plt.quiver(gridPos[:,:,0],gridPos[:,:,1],grid[:,:,0],grid[:,:,1],angles='xy', scale_units='xy', scale=1.0/32.0, headwidth=1)
l, = plt.plot(xp,yp, 'ro')
plt.quiver(xp,yp,xh,yh,angles='xy', scale_units='xy', color='r', scale=1.0/32.0, headwidth=1.5)
#plt.axis([0,4000, 2000,-2000])
plt.axis('equal')
l.axes.get_xaxis().set_visible(False)
l.axes.get_yaxis().set_visible(False)
l.set_data(xp, yp)
l.axes.set_xlim(xc-sz,xc+sz)
l.axes.set_ylim(yc-sz,yc+sz)
plt.savefig('frames/fig'+'{0:05d}'.format(fc)+'.png')
#writer.grab_frame()
break
|
[
"colin.j.torney@gmail.com"
] |
colin.j.torney@gmail.com
|
7db1a2988c552372fb5395ea469d95dd7642b33f
|
f561a219c57bd75790d3155acac6f54299a88b08
|
/splash_screen/migrations/0001_initial.py
|
595c80d3d3c899474e567d2f95d683c19e6bc3ae
|
[] |
no_license
|
ujjwalagrawal17/OfferCartServer
|
1e81cf2dc17f19fa896062c2a084e6b232a8929e
|
b3cd1c5f8eecc167b6f4baebed3c4471140d905f
|
refs/heads/master
| 2020-12-30T15:31:04.380084
| 2017-05-24T18:26:20
| 2017-05-24T18:26:20
| 91,155,405
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-06 17:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FcmData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fcm', models.CharField(blank=True, max_length=512, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='VersionData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.SmallIntegerField(default=0)),
('compulsory_update', models.SmallIntegerField(default=0)),
('version_type', models.CharField(blank=True, max_length=120, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"ujjwal.iitism@gmail.com"
] |
ujjwal.iitism@gmail.com
|
57ce4e23c369d0ac1c8990a08dd6f14bffa13f86
|
ef3a7391b0a5c5d8e276355e97cbe4de621d500c
|
/venv/Lib/site-packages/spacy/tests/lang/en/test_exceptions.py
|
6285a94089db310ac5481689b6030d62f9ea8679
|
[
"Apache-2.0"
] |
permissive
|
countBMB/BenjiRepo
|
143f6da5d198ea6f06404b4559e1f4528b71b3eb
|
79d882263baaf2a11654ca67d2e5593074d36dfa
|
refs/heads/master
| 2022-12-11T07:37:04.807143
| 2019-12-25T11:26:29
| 2019-12-25T11:26:29
| 230,090,428
| 1
| 1
|
Apache-2.0
| 2022-12-08T03:21:09
| 2019-12-25T11:05:59
|
Python
|
UTF-8
|
Python
| false
| false
| 4,097
|
py
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
def test_en_tokenizer_handles_basic_contraction(en_tokenizer):
text = "don't giggle"
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[1].text == "n't"
text = "i said don't!"
tokens = en_tokenizer(text)
assert len(tokens) == 5
assert tokens[4].text == "!"
@pytest.mark.parametrize("text", ["`ain't", """"isn't""", "can't!"])
def test_en_tokenizer_handles_basic_contraction_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize(
"text_poss,text", [("Robin's", "Robin"), ("Alexis's", "Alexis")]
)
def test_en_tokenizer_handles_poss_contraction(en_tokenizer, text_poss, text):
tokens = en_tokenizer(text_poss)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == "'s"
@pytest.mark.parametrize("text", ["schools'", "Alexis'"])
def test_en_tokenizer_splits_trailing_apos(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'"
@pytest.mark.parametrize("text", ["'em", "nothin'", "ol'"])
def test_en_tokenizer_doesnt_split_apos_exc(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].text == text
@pytest.mark.parametrize("text", ["we'll", "You'll", "there'll"])
def test_en_tokenizer_handles_ll_contraction(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'ll"
assert tokens[1].lemma_ == "will"
@pytest.mark.parametrize(
"text_lower,text_title", [("can't", "Can't"), ("ain't", "Ain't")]
)
def test_en_tokenizer_handles_capitalization(en_tokenizer, text_lower, text_title):
tokens_lower = en_tokenizer(text_lower)
tokens_title = en_tokenizer(text_title)
assert tokens_title[0].text == tokens_lower[0].text.title()
assert tokens_lower[0].text == tokens_title[0].text.lower()
assert tokens_lower[1].text == tokens_title[1].text
@pytest.mark.parametrize("pron", ["I", "You", "He", "She", "It", "We", "They"])
@pytest.mark.parametrize("contraction", ["'ll", "'d"])
def test_en_tokenizer_keeps_title_case(en_tokenizer, pron, contraction):
tokens = en_tokenizer(pron + contraction)
assert tokens[0].text == pron
assert tokens[1].text == contraction
@pytest.mark.parametrize("exc", ["Ill", "ill", "Hell", "hell", "Well", "well"])
def test_en_tokenizer_excludes_ambiguous(en_tokenizer, exc):
tokens = en_tokenizer(exc)
assert len(tokens) == 1
@pytest.mark.parametrize(
"wo_punct,w_punct", [("We've", "`We've"), ("couldn't", "couldn't)")]
)
def test_en_tokenizer_splits_defined_punct(en_tokenizer, wo_punct, w_punct):
tokens = en_tokenizer(wo_punct)
assert len(tokens) == 2
tokens = en_tokenizer(w_punct)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["e.g.", "p.m.", "Jan.", "Dec.", "Inc."])
def test_en_tokenizer_handles_abbr(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
def test_en_tokenizer_handles_exc_in_text(en_tokenizer):
text = "It's mediocre i.e. bad."
tokens = en_tokenizer(text)
assert len(tokens) == 6
assert tokens[3].text == "i.e."
@pytest.mark.parametrize("text", ["1am", "12a.m.", "11p.m.", "4pm"])
def test_en_tokenizer_handles_times(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[1].lemma_ in ["a.m.", "p.m."]
@pytest.mark.parametrize(
"text,norms", [("I'm", ["i", "am"]), ("shan't", ["shall", "not"])]
)
def test_en_tokenizer_norm_exceptions(en_tokenizer, text, norms):
tokens = en_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
@pytest.mark.parametrize(
"text,norm", [("radicalised", "radicalized"), ("cuz", "because")]
)
def test_en_lex_attrs_norm_exceptions(en_tokenizer, text, norm):
tokens = en_tokenizer(text)
assert tokens[0].norm_ == norm
|
[
"bengmen92@gmail.com"
] |
bengmen92@gmail.com
|
78e8a604cecf27fe811b0c948ad111c099ce963d
|
e54e1a63bffbe913f5e5018ace56cfa3eab1a72b
|
/practice/Leetcode/1253_reconstruct_a_2_row_binary_matrix.py
|
2b71d9f4f15e8a18ca410c4daa4699f6e1846cec
|
[] |
no_license
|
rmodi6/scripts
|
5e27a46ce8970cbf601f132a53164c273f1812ea
|
7cc47eecac00e6bd0b3ec74d7eed8ec3e0e77a84
|
refs/heads/master
| 2022-02-14T20:41:28.765751
| 2022-01-20T06:59:40
| 2022-01-20T06:59:40
| 168,207,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# https://leetcode.com/contest/weekly-contest-162/problems/reconstruct-a-2-row-binary-matrix/
import numpy as np
class Solution:
def reconstructMatrix(self, upper: int, lower: int, colsum: List[int]) -> List[List[int]]:
if upper + lower != sum(colsum):
return []
ans = np.zeros((2, len(colsum)), dtype='int32')
for i, n in enumerate(colsum):
if n == 2:
if upper > 0 and lower > 0:
ans[0][i], ans[1][i] = 1, 1
upper -= 1
lower -= 1
else:
return []
for i, n in enumerate(colsum):
if n == 1:
if upper > 0:
ans[0][i] = 1
upper -= 1
elif lower > 0:
ans[1][i] = 1
lower -= 1
else:
return []
return ans.tolist()
|
[
"modi.ruchit6@gmail.com"
] |
modi.ruchit6@gmail.com
|
0212f9e5951c9f222ca5a846a070bf81530f2a1c
|
47c175daf97051e1f5c37b161f16abbd5f5a506e
|
/modules/forward_attention.py
|
1572b09366af7b47ef3fe8cd017f1bbae7507555
|
[
"BSD-3-Clause"
] |
permissive
|
nii-yamagishilab/self-attention-tacotron
|
947d1d2eb8bc25f70331fbc401bf44c93ef92673
|
0ebd96114feab5a499964402a8ab7e402f0083b4
|
refs/heads/master
| 2021-07-11T06:13:18.202669
| 2020-06-19T03:04:42
| 2020-06-19T03:04:42
| 156,176,608
| 116
| 35
|
BSD-3-Clause
| 2020-06-19T03:04:43
| 2018-11-05T07:21:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,383
|
py
|
# ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: Yusuke Yasuda (yasuda@nii.ac.jp)
# All rights reserved.
# ==============================================================================
""" """
import tensorflow as tf
from tensorflow.contrib.seq2seq import BahdanauAttention
from collections import namedtuple
def _location_sensitive_score(W_query, W_fill, W_keys):
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or tf.shape(W_keys)[-1]
v_a = tf.get_variable("attention_variable",
shape=[num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable("attention_bias",
shape=[num_units],
dtype=dtype,
initializer=tf.zeros_initializer())
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fill + b_a), axis=[2])
def _calculate_context(alignments, values):
'''
This is a duplication of tensorflow.contrib.seq2seq.attention_wrapper._compute_attention.
ToDo: Avoid the redundant computation. This requires abstraction of AttentionWrapper itself.
:param alignments: [batch_size, 1, memory_time]
:param values: [batch_size, memory_time, memory_size]
:return:
'''
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
context = tf.matmul(expanded_alignments, values) # [batch_size, 1, memory_size]
context = tf.squeeze(context, [1]) # [batch_size, memory_size]
return context
class ForwardAttentionState(namedtuple("ForwardAttentionState", ["alignments", "alpha", "u"])):
pass
class ForwardAttention(BahdanauAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length,
attention_kernel,
attention_filters,
use_transition_agent=False,
cumulative_weights=True,
name="ForwardAttention"):
super(ForwardAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
probability_fn=None,
name=name)
self._use_transition_agent = use_transition_agent
self._cumulative_weights = cumulative_weights
self.location_convolution = tf.layers.Conv1D(filters=attention_filters,
kernel_size=attention_kernel,
padding="SAME",
use_bias=True,
bias_initializer=tf.zeros_initializer(),
name="location_features_convolution")
self.location_layer = tf.layers.Dense(units=num_units,
use_bias=False,
dtype=memory.dtype,
name="location_features_layer")
if use_transition_agent:
# ToDo: support speed control bias
self.transition_factor_projection = tf.layers.Dense(units=1,
use_bias=True,
dtype=memory.dtype,
activation=tf.nn.sigmoid,
name="transition_factor_projection")
def __call__(self, query, state):
previous_alignments, prev_alpha, prev_u = state
with tf.variable_scope(None, "location_sensitive_attention", [query]):
# processed_query shape [batch_size, query_depth] -> [batch_size, attention_dim]
processed_query = self.query_layer(query) if self.query_layer else query
# -> [batch_size, 1, attention_dim]
expanded_processed_query = tf.expand_dims(processed_query, 1)
# [batch_size, max_time] -> [batch_size, max_time, 1]
expanded_alignments = tf.expand_dims(previous_alignments, axis=2)
# location features [batch_size, max_time, filters]
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = _location_sensitive_score(expanded_processed_query, processed_location_features, self.keys)
alignments = self._probability_fn(energy, state)
# forward attention
prev_alpha_n_minus_1 = tf.pad(prev_alpha[:, :-1], paddings=[[0, 0], [1, 0]])
alpha = ((1 - prev_u) * prev_alpha + prev_u * prev_alpha_n_minus_1 + 1e-7) * alignments
alpha_normalized = alpha / tf.reduce_sum(alpha, axis=1, keep_dims=True)
if self._use_transition_agent:
context = _calculate_context(alpha_normalized, self.values)
transition_factor_input = tf.concat([context, processed_query], axis=-1)
transition_factor = self.transition_factor_projection(transition_factor_input)
else:
transition_factor = prev_u
if self._cumulative_weights:
next_state = ForwardAttentionState(alignments + previous_alignments, alpha_normalized, transition_factor)
else:
next_state = ForwardAttentionState(alignments, alpha_normalized, transition_factor)
return alpha_normalized, next_state
@property
def state_size(self):
return ForwardAttentionState(self._alignments_size, self._alignments_size, 1)
def initial_state(self, batch_size, dtype):
initial_alignments = self.initial_alignments(batch_size, dtype)
# alpha_0 = 1, alpha_n = 0 where n = 2, 3, ..., N
initial_alpha = tf.concat([
tf.ones([batch_size, 1], dtype=dtype),
tf.zeros_like(initial_alignments, dtype=dtype)[:, 1:]], axis=1)
# transition factor
initial_u = 0.5 * tf.ones([batch_size, 1], dtype=dtype)
return ForwardAttentionState(initial_alignments, initial_alpha, initial_u)
|
[
"yusuke.007.yasud@gmail.com"
] |
yusuke.007.yasud@gmail.com
|
c208f338b0b8e717f7788e70ab415ccb06596ec2
|
be6ce691a3667edf152859f16804e06aaa486a03
|
/solution1/deprecated.py
|
f70d197db4f26a45a6e6cf1f3aaa93a6efa255a6
|
[] |
no_license
|
mik-laj/airflow-deprecation-sample
|
d9b7d068013884177fec833e234914c6a1ec8be3
|
ae1f93ac6ab85cec4c57dcb62f956fec73d88bbe
|
refs/heads/master
| 2020-04-23T00:13:41.579998
| 2019-07-30T13:17:29
| 2019-07-30T13:17:50
| 170,771,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
import warnings
from solution1.new import *
warnings.warn("solution1.deprecated has moved to solution1.new. Import of "
"solution.new will become unsupported in version 2",
DeprecationWarning, 2)
|
[
"kamil.bregula@polidea.com"
] |
kamil.bregula@polidea.com
|
283845b8c4a81738b39c332e062e558f4a1fa42f
|
e03f502312775b01b41ea7c6f5cb3dfbafdb8509
|
/aboutus/api/serializers.py
|
189187d979c6f742e946a41c169302bc8c45fb14
|
[] |
no_license
|
Grechanin/Misteckiy-DjangoRest-React-Redux
|
e223e89310362b8c21e30c8c669d4e170d232db6
|
f05eb50a6aec72432716672294df81c3dc939ddd
|
refs/heads/master
| 2020-04-13T10:58:17.931584
| 2019-02-18T09:21:54
| 2019-02-18T09:21:54
| 163,159,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
from aboutus.models import AboutUs
from rest_framework import serializers
class AboutUsSerializer(serializers.ModelSerializer):
class Meta:
model = AboutUs
fields = (
'tab_title',
'title',
'short_description',
'description',
)
|
[
"grechanin@gmail.com"
] |
grechanin@gmail.com
|
84943acbf7b7b989ac08e4c3d173d53799243119
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/1170. Compare Strings by Frequency of the Smallest Character/solution2.py
|
f55713f52de924d420434c926569d1d9fb130de7
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
from bisect import bisect
class Solution:
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
f = sorted([w.count(min(w)) for w in words])
return [len(f) - bisect(f, q.count(min(q))) for q in queries]
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
932f0f3ca464a0e327e0dcff6fe1f74ce0621071
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/routing_transformer/routing_tf_api.py
|
62feaeaa11136632e25caf46ffb158383e6714e4
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 7,727
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pdb
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.get_logger().setLevel('ERROR')
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import problem
from routing_transformer.problems import pg19
from tensorflow.compat.v1 import estimator as tf_estimator
from tqdm import tqdm
from routing_transformer.sparse_transformer import SparseTransformer
import numpy as np
import random
from scipy.special import log_softmax
VOCAB_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-data/vocab.pg19_length8k.32768.subwords"
HPARAMS_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/hparams.json"
CKPT_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/ckpt-3530000"
MAX_SEQUENCE_LENGTH = 8192
class SparseTransformerWrapper(object):
def __init__(self, max_seq_length=None):
# Load hyperparameters
self.max_seq_length = max_seq_length or MAX_SEQUENCE_LENGTH
# Needed since RT uses blocks of size 256
assert self.max_seq_length % 256 == 0
hparams = hparams_lib.create_hparams_from_json(HPARAMS_PATH)
hparams.use_tpu = False
hparams = zero_dropout(hparams)
# Build TF1 graph of model
sptf_model = SparseTransformer(hparams, tf_estimator.ModeKeys.EVAL)
self.input_nodes = {
"targets": tf.placeholder(tf.int32, [None, self.max_seq_length])
}
self.output_nodes = sptf_model.body(self.input_nodes)
# Map the checkpoint variables to the graph
init_from_checkpoint(CKPT_PATH, variable_prefix="sparse_transformer/body")
# create a session object, and actually initialize the graph
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.encoder = text_encoder.SubwordTextEncoder(VOCAB_PATH)
def forward(self, sentences, encode_sentences=True, relevant_subsequences=None):
encoded_sents = []
encoded_seqs_no_pad = []
if encode_sentences:
for sent in sentences:
encoded = []
for line in sent.split("\n"):
new_tokens = self.encoder.encode(line.strip())
if len(encoded) + len(new_tokens) >= self.max_seq_length:
break
encoded.extend(new_tokens)
encoded.append(text_encoder.EOS_ID)
encoded_seqs_no_pad.append(encoded)
# pad shorter sequences to the full length
encoded = encoded + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(encoded))]
assert len(encoded) == self.max_seq_length
encoded_sents.append(encoded)
else:
# assume sentences are encoded, pad/truncate them
for sent in sentences:
sent = sent[:self.max_seq_length]
encoded_seqs_no_pad.append(sent)
sent = sent + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(sent))]
encoded_sents.append(sent)
feed_dict = {
self.input_nodes["targets"]: np.array(encoded_sents)
}
outputs = self.sess.run(self.output_nodes, feed_dict=feed_dict)
return_outputs = {
"logits": np.squeeze(outputs[0], axis=(2, 3)),
"loss": outputs[1]["training"],
"encoded_seqs_no_pad": encoded_seqs_no_pad
}
if relevant_subsequences is not None:
for i, rss in enumerate(relevant_subsequences):
encoded_subseq = self.encoder.encode(rss)
positions = find_sub_list(encoded_subseq, encoded_sents[i])
misaligned_prefix_length = 0
while positions is None:
misaligned_prefix_length += 1
encoded_subseq = encoded_subseq[1:]
positions = find_sub_list(encoded_subseq, encoded_sents[i])
start, end = positions[-1]
relevant_logits = return_outputs["logits"][i][start:end]
log_probs = log_softmax(relevant_logits, axis=1)
gold_log_probs = [lp[index] for index, lp in zip(encoded_subseq, log_probs)]
return_outputs["subseq_log_loss"] = -1 * np.mean(gold_log_probs)
return_outputs["misaligned_prefix_length"] = misaligned_prefix_length
return return_outputs
def close(self):
self.sess.close()
def find_sub_list(sl, l):
"""Find sub-string, so as to be able to compute ppl of a sub-string."""
sll=len(sl)
matches = []
for ind in (i for i,e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
matches.append(
(ind, ind + sll)
)
if matches:
return matches
def zero_dropout(hparams):
hparams.input_dropout = 0.0
hparams.dropout = 0.0
hparams.relu_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
return hparams
def log_variables(name, var_names):
tf.logging.info("%s (%d total): %s", name, len(var_names),
random.sample(var_names, min(len(var_names), 5)))
def init_from_checkpoint(checkpoint_path,
checkpoint_prefix=None,
variable_prefix=None,
target_variables=None):
"""Initializes all of the variables using `init_checkpoint."""
tf.logging.info("Loading variables from %s", checkpoint_path)
checkpoint_variables = {
name: name for name, _ in tf.train.list_variables(checkpoint_path) if "Adafactor" not in name
}
if target_variables is None:
target_variables = tf.trainable_variables()
target_variables = {var.name.split(":")[0]: var for var in target_variables}
if checkpoint_prefix is not None:
checkpoint_variables = {
checkpoint_prefix + "/" + name: varname
for name, varname in checkpoint_variables.items()
}
if variable_prefix is not None:
target_variables = {
variable_prefix + "/" + name: var
for name, var in target_variables.items()
}
checkpoint_var_names = set(checkpoint_variables.keys())
target_var_names = set(target_variables.keys())
intersected_var_names = target_var_names & checkpoint_var_names
assignment_map = {
checkpoint_variables[name]: target_variables[name]
for name in intersected_var_names
}
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
log_variables("Loaded variables", intersected_var_names)
log_variables("Uninitialized variables", target_var_names - checkpoint_var_names)
log_variables("Unused variables", checkpoint_var_names - target_var_names)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
64e7542df83df9bd0d6edf9f81dd3c5add9aef71
|
0800aac473cbb94f3ac263c202979498c326cf18
|
/법인세_총설.py
|
a437c75324c85c0332211d27ad24fe8df470b893
|
[] |
no_license
|
DanielHennyKwon/TAX_LIM_JEONG
|
8f12e072c044cd17646f196c17b51d1e0cae179e
|
a263b4e90f0ac78500382047bf7ae72380213ca8
|
refs/heads/master
| 2023-06-16T10:50:55.111407
| 2021-07-11T02:59:50
| 2021-07-11T02:59:50
| 384,847,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,432
|
py
|
# -*- coding: utf-8 -*-
# 2018-12-24 권달현
import 결산의확정, 신고납부절차, 기한후신고, 수정신고, 경정청구, 법인의분류, 세금의종류, 실질과세, 소액주주, 대주주, 중소기업, 이월과세, 과세이연, 세무조정, 소득처분, 법인세비용, 세액계산_구조,세무조정_흐름도
_={
"결산의 확정":결산의확정.결산의확정,
"법인세의 신고납부절차":신고납부절차.법인세,
"기한후신고":기한후신고.법인세,
"수정신고":수정신고._,
"경정청구":경정청구._,
"법인세법상 법인의 분류":법인의분류.법인세,
"법인세의 종류":세금의종류.법인세,
"실질과세":실질과세.법인세,
"소액주주":소액주주.법인세,
"대주주":대주주.법인세,
"중소기업":중소기업._,
"이월과세":이월과세.법인세,
"과세이연":과세이연.법인세,
"세무조정 흐름도":세무조정_흐름도.법인세,
"세무조정":세무조정.법인세,
"소득처분":소득처분.법인세,
"법인의 각 사업연도소득과 과세표준 및 세액계산의 구조":세액계산_구조.법인세,
"법인세비용":법인세비용.법인세,
}
#___________________________________________________
제목='법인세 총설'
tax=_
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,parent=None,title=제목)
self.SetSize(420,320*2)
self.mainPanel=wx.Panel(self)
self.expandButton=wx.Button(self.mainPanel,label='펼침')
self.tree=wx.TreeCtrl(self.mainPanel)
root=self.tree.AddRoot(제목)
for i in tax:
ii=self.tree.AppendItem(root,i)
for j in tax[i]:
jj=self.tree.AppendItem(ii,j)
for k in tax[i][j]:
kk=self.tree.AppendItem(jj,k)
for m in tax[i][j][k]:
mm=self.tree.AppendItem(kk,m)
for n in tax[i][j][k][m]:
nn=self.tree.AppendItem(mm,n)
for p in tax[i][j][k][m][n]:
pp=self.tree.AppendItem(nn,p)
for q in tax[i][j][k][m][n][p]:
qq=self.tree.AppendItem(pp,q)
for r in tax[i][j][k][m][n][p][q]:
rr=self.tree.AppendItem(qq,r)
self.staticText =wx.TextCtrl(self.mainPanel,style=wx.TE_MULTILINE)
self.vtBoxSizer=wx.BoxSizer(wx.VERTICAL)
self.vtBoxSizer.Add(self.expandButton,0,wx.EXPAND|wx.ALL,5)
self.vtBoxSizer.Add(self.tree ,5,wx.EXPAND|wx.ALL,5)
self.vtBoxSizer.Add(self.staticText ,0,wx.EXPAND|wx.ALL,5)
self.mainPanel.SetSizer(self.vtBoxSizer)
self.Bind(wx.EVT_BUTTON ,self.OnExpandButton,self.expandButton)
self.Bind(wx.EVT_TREE_SEL_CHANGED,self.OnNodeSelected,self.tree)
def OnExpandButton(self,e):
self.tree.ExpandAll()
def OnNodeSelected(self,e):
selected=self.tree.GetSelection()
self.staticText.SetLabel(self.tree.GetItemText(selected))
self.mainPanel.Layout()
if __name__=='__main__':
app=wx.App()
frame=MyFrame()
frame.Show()
app.MainLoop()
#___________________________________________________
|
[
"cpahouse@naver.com"
] |
cpahouse@naver.com
|
0e2e19efd181694bd82a31e6ea9bd4fd1ccb7faf
|
248d20fa6c37afc1501b47398451bf15dc8f0165
|
/ryosuke/chapter04/knock38.py
|
5e72a2bf383a6cd35a7e272d4ed196b6769cd017
|
[] |
no_license
|
tmu-nlp/100knock2016
|
20e9efd4698f59918aa850ba40163906f13dcb7f
|
d5f7a76286cb95bb374ff98bc0c9db3c796d113d
|
refs/heads/master
| 2021-01-21T02:20:57.121371
| 2016-08-03T01:49:38
| 2016-08-03T01:49:38
| 55,942,482
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
from knock30 import get_sentences
from collections import Counter
import matplotlib.pyplot as plt
vocab = Counter()
for sentence in get_sentences():
vocab += Counter(m['surface'] for m in sentence)
names, freqs = zip(*vocab.most_common())
plt.hist(freqs, bins=len(set(freqs)))
plt.show()
|
[
"tmcit.miyazaki@gmail.com"
] |
tmcit.miyazaki@gmail.com
|
66f614fc294e9d8c94babbbce4963368e0136402
|
35b460a5e72e3cb40681861c38dc6d5df1ae9b92
|
/CodeFights/Arcade/Intro/islandOfKnowledge/minesweeper.py
|
7a778a54e8fe37740efbc970e191ddc6ef1ca2ae
|
[] |
no_license
|
robgoyal/CodingChallenges
|
9c5f3457a213cf54193a78058f74fcf085ef25bc
|
0aa99d1aa7b566a754471501945de26644558d7c
|
refs/heads/master
| 2021-06-23T09:09:17.085873
| 2019-03-04T04:04:59
| 2019-03-04T04:04:59
| 94,391,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
# Name: minesweeper.py
# Author: Robin Goyal
# Last-Modified: July 12, 2017
# Purpose: Give an array of true and false values with true indicating a mine
# return an array of the same length indicating number of surrounding
# mines at each position
# Note: Could've optimized the solution but a pure brute force implementation
def minesweeper(matrix):
grid = []
for row in range(len(matrix)):
gridRow = []
for col in range(len(matrix[0])):
count = 0
# Top Row
if (row == 0):
if (col == 0): # Top-Left corner
count = [matrix[row][col+1], matrix[row+1][col], matrix[row+1][col+1]].count(True)
elif (col == len(matrix[0]) - 1): # Top-Right corner
count = [matrix[row][col-1], matrix[row+1][col], matrix[row+1][col-1]].count(True)
else: # Middle Columns in top Row
count = [matrix[row][col-1], matrix[row][col+1]].count(True) \
+ matrix[row+1][col-1:col+2].count(True)
# Bottom Row
elif (row == len(matrix) -1):
if (col == 0): # Bottom-Left corner
count = [matrix[row][col+1], matrix[row-1][col], matrix[row-1][col+1]].count(True)
elif (col == len(matrix[0]) - 1): # Bottom-Right corner
count = [matrix[row][col-1], matrix[row-1][col], matrix[row-1][col-1]].count(True)
else: # Middle Columns in bottom Row
count = [matrix[row][col-1], matrix[row][col+1]].count(True) \
+ matrix[row-1][col-1:col+2].count(True)
# Middle Rows
else:
if (col == 0): # Left most column
count = matrix[row-1][col:col+2].count(True) + [matrix[row][col+1]].count(True) \
+ matrix[row+1][col:col+2].count(True)
elif (col == len(matrix[0]) -1): # Right most column
count = matrix[row-1][col-1:col+1].count(True) + [matrix[row][col-1]].count(True) \
+ matrix[row+1][col-1:col+1].count(True)
else: # Middle columns
count = matrix[row-1][col-1:col+2].count(True) + matrix[row+1][col-1:col+2].count(True) + \
[matrix[row][col-1], matrix[row][col+1]].count(True)
gridRow.append(count)
grid.append(tempRow)
return grid
|
[
"goyal.rob@gmail.com"
] |
goyal.rob@gmail.com
|
f9e1ca44905679e39f7b725bab3e049bd3cf44d3
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/number-of-senior-citizens.py
|
50b65c0c4bd9f9324ebc57219dbfd33cea759e81
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
# Time: O(n)
# Space: O(1)
# string
class Solution(object):
def countSeniors(self, details):
"""
:type details: List[str]
:rtype: int
"""
return sum(x[-4:-2] > "60" for x in details)
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
cee24ad2b9015a0358c23faf46c7db3e63048385
|
b40a661aa78c10ea8413b349f1efe288149f4ab0
|
/App/migrations/0004_address.py
|
20a5f2bea93a0a8e0e15352f1439fbf6e1dd1c5b
|
[] |
no_license
|
0helloword/DjangoSum
|
daed4ab9488c5d53518623eb5d35c3a32a826129
|
72b528415edd2a76a7a19da708d4046de2a014ac
|
refs/heads/master
| 2022-11-25T15:15:30.843401
| 2020-08-02T03:18:07
| 2020-08-02T03:18:07
| 275,606,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-06-27 13:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('App', '0003_cart'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('a_add', models.CharField(max_length=128)),
('a_customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='App.Customer')),
],
),
]
|
[
"532720298@qq.com"
] |
532720298@qq.com
|
3aefc1186a88845c16d658de39ccb722a760a83f
|
e922f5dac332fbf4de910ade55f07cb75d900d1b
|
/templates/influxdb/actions.py
|
22fc5f7c27e2e8b057d5a9a71db43c9800bbaa34
|
[
"Apache-2.0"
] |
permissive
|
hossnys/0-orchestrator
|
441970f0bd784b72c40f6da4fa44ca2c70b9ea8c
|
cce7cc1e1f957e0eb691b863502fa6c3f4620d52
|
refs/heads/master
| 2021-01-01T18:46:27.123614
| 2017-07-26T13:59:30
| 2017-07-26T13:59:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,070
|
py
|
def get_container(service, force=True):
containers = service.producers.get('container')
if not containers:
if force:
raise RuntimeError('Service didn\'t consume any containers')
else:
return
return containers[0]
def init(job):
from zeroos.orchestrator.configuration import get_configuration
service = job.service
container_actor = service.aysrepo.actorGet('container')
config = get_configuration(service.aysrepo)
args = {
'node': service.model.data.node,
'flist': config.get(
'influxdb-flist', 'https://hub.gig.tech/gig-official-apps/influxdb.flist'),
'hostNetworking': True
}
cont_service = container_actor.serviceCreate(instance='{}_influxdb'.format(service.name), args=args)
service.consume(cont_service)
def install(job):
j.tools.async.wrappers.sync(job.service.executeAction('start', context=job.context))
def start(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
service = job.service
container = get_container(service)
j.tools.async.wrappers.sync(container.executeAction('start', context=job.context))
container_ays = Container.from_ays(container, job.context['token'])
influx = InfluxDB(
container_ays, service.parent.model.data.redisAddr, service.model.data.port)
influx.start()
service.model.data.status = 'running'
influx.create_databases(service.model.data.databases)
service.saveAll()
def stop(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
service = job.service
container = get_container(service)
container_ays = Container.from_ays(container, job.context['token'])
if container_ays.is_running():
influx = InfluxDB(
container_ays, service.parent.model.data.redisAddr, service.model.data.port)
influx.stop()
j.tools.async.wrappers.sync(container.executeAction('stop', context=job.context))
service.model.data.status = 'halted'
service.saveAll()
def uninstall(job):
service = job.service
container = get_container(service, False)
if container:
j.tools.async.wrappers.sync(service.executeAction('stop', context=job.context))
j.tools.async.wrappers.sync(container.delete())
j.tools.async.wrappers.sync(service.delete())
def processChange(job):
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.influxdb.influxdb import InfluxDB
from zeroos.orchestrator.configuration import get_jwt_token_from_job
service = job.service
args = job.model.args
if args.pop('changeCategory') != 'dataschema' or service.model.actionsState['install'] in ['new', 'scheduled']:
return
container_service = get_container(service)
container = Container.from_ays(container_service, get_jwt_token_from_job(job))
influx = InfluxDB(
container, service.parent.model.data.redisAddr, service.model.data.port)
if args.get('port'):
if container.is_running() and influx.is_running()[0]:
influx.stop()
service.model.data.status = 'halted'
influx.port = args['port']
influx.start()
service.model.data.status = 'running'
service.model.data.port = args['port']
if args.get('databases'):
if container.is_running() and influx.is_running()[0]:
create_dbs = set(args['databases']) - set(service.model.data.databases)
drop_dbs = set(service.model.data.databases) - set(args['databases'])
influx.create_databases(create_dbs)
influx.drop_databases(drop_dbs)
service.model.data.databases = args['databases']
service.saveAll()
def init_actions_(service, args):
return {
'init': [],
'install': ['init'],
'monitor': ['start'],
'delete': ['uninstall'],
'uninstall': [],
}
|
[
"deboeck.jo@gmail.com"
] |
deboeck.jo@gmail.com
|
21439bcac6cdd546eeab5d2c26363fe72b79eb43
|
ea549f5974db822d0733e5417d313997de9ca2bb
|
/craig_list_site/migrations/0001_initial.py
|
57e111b3bffaa4b9a0892c8fca78c81caa2d727c
|
[] |
no_license
|
Bibin22/craiglist
|
fe5a641cf4b8c03557c1775605a5e8b4da9b43de
|
853b377f4951ee3ac9072bc22d486f520e18b1bc
|
refs/heads/master
| 2023-02-05T11:32:24.911491
| 2021-01-02T05:30:34
| 2021-01-02T05:30:34
| 326,116,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
# Generated by Django 3.1.4 on 2020-12-24 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('search', models.CharField(max_length=500)),
('created', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"bibinjoy82@gmail.com"
] |
bibinjoy82@gmail.com
|
e33bf7188bb39a15eab44ec863cb21e1daa47b3e
|
acf15961c47fb947a407a4318214110b9597d9e6
|
/env/bin/jupyter-kernel
|
667c26308122816ab476f0645e256bfc37e040c0
|
[] |
no_license
|
qu4ku/dshub-website
|
43e378352246357db83da9b9b0acd760aebbc83a
|
792d94e41fa666093eda2b5511bbcab27e0bb287
|
refs/heads/master
| 2021-06-02T19:27:39.708138
| 2020-05-19T09:18:42
| 2020-05-19T09:18:42
| 135,715,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
#!/Users/kamilwroniewicz/_code/_github/180601-datahub-website/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"qu4ku@hotmail.com"
] |
qu4ku@hotmail.com
|
|
a3fac0df2496aea555fb951e1641e2c5d9b07391
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_233/ch45_2020_03_09_13_17_35_961786.py
|
7cf83bf5b09f45069fdb4e7c49f9a6d644c64307
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
lista = []
while True:
num = int(input())
if num <= 0: break
lista.append(num)
lista_inv = range(len(lista))
for i in range(len(lista)):
lista_inv[-i + 1] = lista[i]
print(lista_inv)
|
[
"you@example.com"
] |
you@example.com
|
ddc161b7e46516dd3785e6dba80385cf69326f1e
|
f3f01d98f2f924b7f2ce9c682b63ef68a0b943d7
|
/Type_conversion.py
|
67e8274a805561624b385bea780d5a3d1ffc4e07
|
[] |
no_license
|
nihalgaurav/pythonprep
|
0d935244f4c20b2ba660a1bc192352654d4a9366
|
d3023e1b58d9d5333e909f71d9c3fa7c54c420f5
|
refs/heads/master
| 2023-03-27T06:09:38.757433
| 2021-03-16T05:22:07
| 2021-03-16T05:22:07
| 344,804,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 694
|
py
|
number = 17
width = len(str(bin(number)[2:])) + 2
print("INT".rjust(width) + "OCT".rjust(width) + "HEX".rjust(width) + "BIN".rjust(width))
for x in range(1, number+1):
print(str(int(x)).rjust(width, " ") + str(oct(x))[2:].rjust(width, " ") + str(hex(x))[2:].upper().rjust(width, " ")
+ str(bin(x)[2:]).rjust(width, " "))
num = 5
n = 97 + num
for i in range(num):
p = ''
for j in range(i):
p = p + "-" + chr(n-i+j)
print(p[::-1].rjust(num*2-2, "-") + chr(n-i-1) + p.ljust(num*2-2, "-"))
for i in range(num-2,-1, -1):
p = ''
for j in range(i):
p = p + "-" + chr(n-i+j)
print(p[::-1].rjust(num*2-2, "-") + chr(n-i-1) + p.ljust(num*2-2, "-"))
|
[
"nihalgaurav85@gmail.com"
] |
nihalgaurav85@gmail.com
|
2eb9a26bdde17a586ad5280059024d4004382a91
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/ayQTiQAcFJhtauhe3_17.py
|
1dca03f7cb44ef5e8ee9f421dc82cdd88d7fd01c
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
"""
Given a list of integers, determine whether the sum of its elements is even or
odd.
The output should be a string (`"odd"` or `"even"`).
If the input list is empty, consider it as a list with a zero (`[0]`).
### Examples
even_or_odd([0]) ➞ "even"
even_or_odd([1]) ➞ "odd"
even_or_odd([]) ➞ "even"
even_or_odd([0, 1, 5]) ➞ "even"
### Notes
N/A
"""
def even_or_odd(lst):
summ=int(sum(lst))
if summ % 2 == 0:
return "even"
if summ % 2 == 1:
return "odd"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
12a9b9befcf7af332c3ea172149268070aea9c7c
|
deb740e5086386a68d155b2482f9a9ec2095012c
|
/jdcloud_sdk/services/live/apis/DescribeLivePublishStreamNumRequest.py
|
212277a2947473efb924dd9775e2df6ca9c01142
|
[
"Apache-2.0"
] |
permissive
|
aluode99/jdcloud-sdk-python
|
843afdd2855a55ecd7cd90fe255df213a8f56e28
|
3da9ae9c0f08e2c20a73dde04f6453d3eb9db16a
|
refs/heads/master
| 2020-05-26T09:26:24.307434
| 2019-05-29T02:35:23
| 2019-05-29T02:35:23
| 188,186,313
| 0
| 0
| null | 2019-05-23T07:46:01
| 2019-05-23T07:46:00
| null |
UTF-8
|
Python
| false
| false
| 2,692
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeLivePublishStreamNumRequest(JDCloudRequest):
"""
查询直播推流数
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeLivePublishStreamNumRequest, self).__init__(
'/describeLivePublishStreamNum', 'GET', header, version)
self.parameters = parameters
class DescribeLivePublishStreamNumParameters(object):
def __init__(self, startTime, ):
"""
:param startTime: 起始时间
- UTC时间
格式:yyyy-MM-dd'T'HH:mm:ss'Z'
示例:2018-10-21T10:00:00Z
"""
self.domainName = None
self.appName = None
self.protocolType = None
self.period = None
self.startTime = startTime
self.endTime = None
def setDomainName(self, domainName):
"""
:param domainName: (Optional) 播放域名
"""
self.domainName = domainName
def setAppName(self, appName):
"""
:param appName: (Optional) 应用名称
"""
self.appName = appName
def setProtocolType(self, protocolType):
"""
:param protocolType: (Optional) 查询的流协议类型,取值范围:"rtmp,hdl,hls",多个时以逗号分隔
"""
self.protocolType = protocolType
def setPeriod(self, period):
"""
:param period: (Optional) 查询周期,当前取值范围:“oneMin,fiveMin,halfHour,hour,twoHour,sixHour,day,followTime”,分别表示1min,5min,半小时,1小时,2小时,6小时,1天,跟随时间。默认为空,表示fiveMin。当传入followTime时,表示按Endtime-StartTime的周期,只返回一个点
"""
self.period = period
def setEndTime(self, endTime):
"""
:param endTime: (Optional) 结束时间:
- UTC时间
格式:yyyy-MM-dd'T'HH:mm:ss'Z'
示例:2018-10-21T10:00:00Z
- 为空,默认为当前时间
"""
self.endTime = endTime
|
[
"tancong@jd.com"
] |
tancong@jd.com
|
579a8846030030a1b4f846da2163172703055c1e
|
3592ef6ceb0e7654dc68fa9879b8c6fe31bcf6d1
|
/reveries/tools/modeldiffer/lib.py
|
aa794920283f2358703cbb6ef0aad11ced9d157f
|
[
"MIT"
] |
permissive
|
all-in-one-of/reveries-config
|
a83a8208680d857a155e0a05297bde111d8c6845
|
b47a5a6ce05376dffcb893e0823fecbcf1d08e67
|
refs/heads/master
| 2021-01-04T07:44:45.383431
| 2020-02-13T09:00:51
| 2020-02-13T09:00:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
import logging
from avalon import io
main_logger = logging.getLogger("modeldiffer")
def profile_from_database(version_id):
"""
"""
representation = io.find_one({"type": "representation",
"name": "mayaBinary",
"parent": version_id})
if representation is None:
main_logger.critical("Representation not found. This is a bug.")
return
model_profile = representation["data"].get("modelProfile")
if model_profile is None:
main_logger.critical("'data.modelProfile' not found."
"This is a bug.")
return
profile = dict()
for id, meshes_data in model_profile.items():
for data in meshes_data:
name = data.pop("hierarchy")
# No need to compare normals
data.pop("normals")
data["avalonId"] = id
profile[name] = data
return profile
profile_from_host = NotImplemented
select_from_host = NotImplemented
def is_supported_loader(name):
return name in ("ModelLoader",) # "RigLoader")
def is_supported_subset(name):
return any(name.startswith(family)
for family in ("model",)) # "rig"))
|
[
"david962041@gmail.com"
] |
david962041@gmail.com
|
66c48db3d472e9cbef6459a534d94dd8fe60f1ce
|
94f156b362fbce8f89c8e15cd7687f8af267ef08
|
/week3/main/models.py
|
267cfd064ac83b9e6a1feed9dae6e559d5dabd77
|
[] |
no_license
|
DastanB/AdvancedDjango
|
6eee5477cd5a00423972c9cc3d2b5f1e4a501841
|
2b5d4c22b278c6d0e08ab7e84161163fe42e9a3f
|
refs/heads/master
| 2020-07-17T19:21:16.271964
| 2019-12-03T21:58:51
| 2019-12-03T21:58:51
| 206,081,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
from django.db import models
from users.models import MainUser
from main.constants import PROJECT_STATUSES, PROJECT_IN_PROCESS, PROJECT_FROZEN, PROJECT_DONE, BLOCK_STATUSES, TASKS_DONE, TASKS_FROZEN, TASKS_IN_PROCESS
import datetime
# Create your models here.
class Project(models.Model):
name = models.CharField(max_length=255)
description = models.CharField(max_length=1000)
status = models.PositiveSmallIntegerField(choises=PROJECT_STATUSES, default=PROJECT_IN_PROCESS)
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='projects')
def is_owner(self, request):
return self.creator.id == request.user.id
def __str__(self):
return self.name
class Block(models.Model):
name = models.CharField(max_length=255)
type_of = models.PositiveSmallIntegerField(choises=BLOCK_STATUSES, default=TASKS_IN_PROCESS)
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='blocks')
def __str__(self):
return self.name
class Task(models.Model):
name = models.CharField(max_length=255)
description = models.CharField(max_length=1000)
priority = models.IntegerField()
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='created_tasks')
executor = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='tasks', null=True)
block = models.ForeignKey(Block, on_delete=models.CASCADE, related_name='tasks')
order = models.IntegerField()
def is_owner(self, request):
return self.creator.id == request.user.id
def __str__(self):
return self.name
class TaskDocument(models.Model):
document = models.FileField()
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='docs')
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name='docs')
def is_owner(self, request):
return self.creator.id == request.user.id
class TaskComment(models.Model):
body = models.CharField(max_length=10000)
task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name='comments')
creator = models.ForeignKey(MainUser, on_delete=models.CASCADE, related_name='comments')
created_at = models.DateTimeField(default=datetime.datetime.now)
def is_owner(self, request):
return self.creator.id == request.user.id
def __str__(self):
return self.body
|
[
"dastan211298@gmail.com"
] |
dastan211298@gmail.com
|
039edd18fd3e878624c2de8607511b5b9ad8a545
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4170/codes/1594_1800.py
|
4bfac1cb471a1d30c906e35552843d6922186bbd
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
a = int(input("Insira o valor da variavel a: "))
b = int(input("Insira o valor da variavel b: "))
c = int(input("Insira o valor da variavel c: "))
x = ((a**2) + (b**2) + (c**2)) / (a + b + c)
print(round(x,7))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
6d816df5012606bc69d35c03b4aac39b3a25c6dd
|
0ec4defa6f83ec044b9e1235cc45964a8145b4d1
|
/venv/lib/python3.6/site-packages/pybrain3/rl/experiments/continuous.py
|
72df9483cfa96feab6da58c6c9be10525203864b
|
[] |
no_license
|
nnarziev/MyWeek_Server
|
e6f6c10ce813cf3dc3aa644958c31a4d01567b4d
|
7c51e79224ba48cd1a230536c27f3bd8cec73a21
|
refs/heads/master
| 2021-08-19T13:46:56.450003
| 2017-11-25T16:48:07
| 2017-11-25T16:48:07
| 112,080,782
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from .experiment import Experiment
class ContinuousExperiment(Experiment):
""" The extension of Experiment to handle continuous tasks. """
def doInteractionsAndLearn(self, number = 1):
""" Execute a number of steps while learning continuously.
no reset is performed, such that consecutive calls to
this function can be made.
"""
for _ in range(number):
self._oneInteraction()
self.agent.learn()
return self.stepid
|
[
"qobiljon.toshnazarov@gmail.com"
] |
qobiljon.toshnazarov@gmail.com
|
47a1085793c09d8ff86cf8e73980e0bcd9595eeb
|
43461f999228079c9bfee03f0e4043f08426051f
|
/python爬虫开发与项目实战笔记/通用爬虫/day10/code/SNBook/items.py
|
cc4533585eccbe86d3f6186bcea51a5c1d717dbc
|
[] |
no_license
|
MapleStoryBoy/spider
|
f9af844ae9812fe21141060213ac2677e719ac73
|
b014d81d52805f9317e85b66024d047e73d59053
|
refs/heads/master
| 2020-05-21T18:27:50.585790
| 2019-07-12T10:11:58
| 2019-07-12T10:11:58
| 186,132,575
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class SnbookItem(scrapy.Item):
# define the fields for your item here like:
parent_type = scrapy.Field()
parent_href = scrapy.Field()
pagecount = scrapy.Field()
son_type = scrapy.Field()
son_href = scrapy.Field()
belong_son_tyoe = scrapy.Field()
book_href = scrapy.Field()
book_name = scrapy.Field()
book_img = scrapy.Field()
book_author = scrapy.Field()
book_descrip = scrapy.Field()
|
[
"MapleStoryBoy@163.com"
] |
MapleStoryBoy@163.com
|
2224c4722a23ff2f4f9c86984146a37d9ca3749e
|
e76ea38dbe5774fccaf14e1a0090d9275cdaee08
|
/src/media/cast/rtp_receiver/rtp_parser/rtp_parser.gyp
|
ade15eebff42e9f6af9baf7ca1709eba30e3b3e3
|
[
"BSD-3-Clause"
] |
permissive
|
eurogiciel-oss/Tizen_Crosswalk
|
efc424807a5434df1d5c9e8ed51364974643707d
|
a68aed6e29bd157c95564e7af2e3a26191813e51
|
refs/heads/master
| 2021-01-18T19:19:04.527505
| 2014-02-06T13:43:21
| 2014-02-06T13:43:21
| 16,070,101
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 569
|
gyp
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cast_rtp_parser',
'type': 'static_library',
'include_dirs': [
'<(DEPTH)/',
'<(DEPTH)/third_party/',
],
'sources': [
'rtp_parser.cc',
'rtp_parser.h',
], # source
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/base.gyp:test_support_base',
],
},
],
}
|
[
"ronan@fridu.net"
] |
ronan@fridu.net
|
e244afe21842d52ced891cd2c82f5a5dc61e1701
|
658e2e3cb8a4d5343a125f7deed19c9ebf06fa68
|
/course_DE/Udacity-Data-Engineering-master/Data Pipeline with Airflow/Production Data Pipelines - Exercise 1.py
|
2189c509168783ee7e6770e7df5d77f68ffca7c2
|
[] |
no_license
|
yennanliu/analysis
|
3f0018809cdc2403f4fbfe4b245df1ad73fa08a5
|
643ad3fed41961cddd006fadceb0e927f1db1f23
|
refs/heads/master
| 2021-01-23T21:48:58.572269
| 2020-10-13T22:47:12
| 2020-10-13T22:47:12
| 57,648,676
| 11
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,591
|
py
|
#Instructions
#In this exercise, we’ll consolidate repeated code into Operator Plugins
#1 - Move the data quality check logic into a custom operator
#2 - Replace the data quality check PythonOperators with our new custom operator
#3 - Consolidate both the S3 to RedShift functions into a custom operator
#4 - Replace the S3 to RedShift PythonOperators with our new custom operator
#5 - Execute the DAG
import datetime
import logging
from airflow import DAG
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators import (
HasRowsOperator,
PostgresOperator,
PythonOperator,
S3ToRedshiftOperator
)
import sql_statements
#
# TODO: Replace the data quality checks with the HasRowsOperator
#
dag = DAG(
"lesson3.exercise1",
start_date=datetime.datetime(2018, 1, 1, 0, 0, 0, 0),
end_date=datetime.datetime(2018, 12, 1, 0, 0, 0, 0),
schedule_interval="@monthly",
max_active_runs=1
)
create_trips_table = PostgresOperator(
task_id="create_trips_table",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.CREATE_TRIPS_TABLE_SQL
)
copy_trips_task = S3ToRedshiftOperator(
task_id="load_trips_from_s3_to_redshift",
dag=dag,
table="trips",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udac-data-pipelines",
s3_key="divvy/partitioned/{execution_date.year}/{execution_date.month}/divvy_trips.csv"
)
#
# TODO: Replace this data quality check with the HasRowsOperator
#
check_trips = HasRowsOperator(
task_id='check_trips_data',
dag=dag,
redshift_conn_id="redshift",
table="trips"
)
create_stations_table = PostgresOperator(
task_id="create_stations_table",
dag=dag,
postgres_conn_id="redshift",
sql=sql_statements.CREATE_STATIONS_TABLE_SQL,
)
copy_stations_task = S3ToRedshiftOperator(
task_id="load_stations_from_s3_to_redshift",
dag=dag,
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udac-data-pipelines",
s3_key="divvy/unpartitioned/divvy_stations_2017.csv",
table="stations"
)
#
# TODO: Replace this data quality check with the HasRowsOperator
#
check_stations = HasRowsOperator(
task_id='check_stations_data',
dag=dag,
redshift_conn_id="redshift",
table="stations"
)
create_trips_table >> copy_trips_task
create_stations_table >> copy_stations_task
copy_stations_task >> check_stations
copy_trips_task >> check_trips
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
0fe346359edc276de2c737c0eb967f27d570aafe
|
6ac77834909c485686638d27c0bf41e6d1765cf7
|
/src/mapping/writer/mysql_hbase_hawq_writer.py
|
79911c5ab6d7c61ed2a50ecbcdabf8ecb5943d18
|
[] |
no_license
|
YangXinNewlife/gears
|
4144e451861efb0f3ae1d738eb5fcd6cec46a833
|
486b1ce5a7b8d8682bb1394be8f5dd6ae0fca837
|
refs/heads/master
| 2021-01-20T01:41:30.074696
| 2017-05-26T08:17:45
| 2017-05-26T08:17:45
| 89,316,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'yx'
from src.mapping.writer.writer import Writer
class MysqlHBaseHawqWriter(Writer):
def __init__(self):
pass
def convert_data_type(self, data_type):
pass
|
[
"yangxin@zetyun.com"
] |
yangxin@zetyun.com
|
e9f8df1e669df7bb971e196bef4e8f0b517d633e
|
ca17bd80ac1d02c711423ac4093330172002a513
|
/goodyhandy/FirstMissingPositive.py
|
9988bcba209286d3584cc6e41ed5e95b6469f9f4
|
[] |
no_license
|
Omega094/lc_practice
|
64046dea8bbdaee99d767b70002a2b5b56313112
|
e61776bcfd5d93c663b247d71e00f1b298683714
|
refs/heads/master
| 2020-03-12T13:45:13.988645
| 2018-04-23T06:28:32
| 2018-04-23T06:28:32
| 130,649,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
class Solution(object):
def firstMissingPositive(self, A):
"""
:type nums: List[int]
:rtype: int
"""
length = len(A)
for i, num in enumerate(A):
if A[i] != i + 1:
while A[i] != i + 1:
if A[i] <= 0 or A[i] > length or A[A[i] -1] == A[i]: break
t = A[A[i] - 1] ; A[A[i] - 1] = A[i] ; A[i] = t
for i, num in enumerate(A):
if num != i + 1:
return i + 1
return length + 1
|
[
"zhao_j1@denison.edu"
] |
zhao_j1@denison.edu
|
7bf8d2a366551d6774730e60de1d62b78af16d52
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_108/125.py
|
c96c54037ce532c493eb7d9d77e0a2a5ad1f93b3
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,787
|
py
|
#!/usr/bin/env python
import bisect
import sys
from collections import defaultdict
def main(args):
finname = '%s.in' % args[1]
foutname = '%s.out' % args[1]
with open(finname, 'r') as fin, open(foutname, 'w') as fout:
T = int(fin.readline().strip())
for i in xrange(1, T+1):
num_vines = int(fin.readline().strip())
vinestats = []
for j in xrange(num_vines):
d, l = [int(_) for _ in fin.readline().strip().split()]
vinestats.append((d, l))
D = int(fin.readline().strip())
memo = dict()
def ok(start_vine, swing_length):
if (start_vine, swing_length) in memo:
return memo[(start_vine, swing_length)]
vine_d, vine_l = vinestats[start_vine]
if vine_l < swing_length:
swing_length = vine_l
if vine_d + swing_length >= D:
memo[(start_vine, swing_length)] = True
return True
last_vine = bisect.bisect(vinestats, (vine_d+swing_length+1, 0), start_vine)
i = start_vine+1
result = False
while i < last_vine:
if ok(i, vinestats[i][0]-vine_d):
memo[(start_vine, swing_length)] = True
return True
i+=1
memo[(start_vine, swing_length)] = False
return False
result = 'YES' if ok(0, vinestats[0][0]) else 'NO'
result_str = 'Case #%s: %s\n' % (i, result)
# print result_str,
fout.write(result_str)
if __name__ == '__main__':
status = main(sys.argv)
sys.exit(status)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
016d2f4b0007f8a40384dcd7a57e8d67f5a5f01f
|
7708c2526947a86d064fc8b07a579baa332c5575
|
/Database/build_db_datasets.py
|
b0b7c3d3ff443564267cc2ad0962d02df56a6c71
|
[] |
no_license
|
shunsunsun/Cell_BLAST-notebooks
|
d622aea190015e8b76207866889dddbd4dd333a8
|
9baebb4311eaf71670f4852238db7b91157e71b1
|
refs/heads/master
| 2022-01-19T05:05:30.269257
| 2019-04-21T13:30:42
| 2019-04-21T13:30:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,024
|
py
|
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import mysql.connector
from utils import nan_safe
def generate_datasets_meta():
dataset_dict = {
item: [
file for file in os.listdir(item)
if file.endswith(".pdf") and file != "peek.pdf"
] for item in os.listdir(".") if item not in (
"__pycache__", ".ipynb_checkpoints"
) and os.path.isdir(item)
}
used_columns = (
"dataset_name", "organism", "organ", "platform",
"cell_number", "publication", "pmid", "remark"
)
single = pd.read_csv(
"../../Datasets/ACA_datasets.csv",
comment="#", skip_blank_lines=True
).loc[:, used_columns]
additional = pd.read_csv(
"../../Datasets/additional_datasets.csv",
comment="#", skip_blank_lines=True
).loc[:, used_columns]
single = pd.concat([single, additional], axis=0, ignore_index=True)
aligned = pd.read_csv(
"../../Datasets/aligned_datasets.csv",
comment="#", skip_blank_lines=True
).loc[:, used_columns]
for idx, row in aligned.iterrows():
aligned.loc[idx, "cell_number"] = single.loc[np.in1d(
single["dataset_name"], row["remark"].split(", ")
), "cell_number"].sum()
combined = pd.concat([single, aligned], axis=0, ignore_index=True)
combined["display"] = np.in1d(
combined["dataset_name"], list(dataset_dict.keys()))
# combined = combined.loc[np.in1d(
# combined["dataset_name"], list(dataset_dict.keys())
# ), :]
# combined["cell_number"] = combined["cell_number"].astype(np.int)
combined["self-projection coverage"] = np.nan
combined["self-projection accuracy"] = np.nan
for idx, row in combined.iterrows():
spf_path = os.path.join(row["dataset_name"], "self_projection.txt")
if not os.path.exists(spf_path):
if row["dataset_name"] in dataset_dict:
print("Missing: " + spf_path)
else:
with open(spf_path, "r") as spf:
lines = spf.readlines()
k1, v1 = lines[0].split()
k2, v2 = lines[1].split()
assert k1 == "coverage" and k2 == "accuracy"
v1, v2 = float(v1.strip()), float(v2.strip())
combined.loc[idx, "self-projection coverage"] = v1
combined.loc[idx, "self-projection accuracy"] = v2
combined["visualization"] = [
(", ".join(dataset_dict[item]) if item in dataset_dict else np.nan)
for item in combined["dataset_name"]
]
# combined.to_csv("./datasets_meta.csv", index=False)
# combined.to_json("./datasets_meta.json", orient="records", double_precision=3)
return combined
def create_table(cnx, cursor):
cursor.execute("DROP TABLE IF EXISTS `datasets`;")
cursor.execute(
"CREATE TABLE `datasets` ("
" `dataset_name` CHAR(50) NOT NULL UNIQUE,"
" `organism` char(50) NOT NULL,"
" `organ` char(100) NOT NULL,"
" `platform` char(50),"
" `cell_number` INT CHECK(`cell_number` > 0),"
" `publication` VARCHAR(300),"
" `pmid` CHAR(8),"
" `remark` VARCHAR(200),"
" `self-projection coverage` FLOAT CHECK(`self-projection coverage` BETWEEN 0 AND 1),"
" `self-projection accuracy` FLOAT CHECK(`self-projection accuracy` BETWEEN 0 AND 1),"
" `visualization` VARCHAR(200),"
" `display` BOOL NOT NULL,"
" PRIMARY KEY USING HASH(`dataset_name`)"
");"
)
def insert_data(cnx, cursor, data):
insert_sql = (
"INSERT INTO `datasets` ("
" `dataset_name`, `organism`, `organ`, `platform`,"
" `cell_number`, `publication`, `pmid`, `remark`,"
" `self-projection coverage`, `self-projection accuracy`,"
" `visualization`, `display`"
") VALUES ("
" %s, %s, %s, %s,"
" %s, %s, %s, %s,"
" %s, %s, %s, %s"
");"
)
for idx, row in data.iterrows():
cursor.execute(insert_sql, (
nan_safe(row["dataset_name"]), nan_safe(row["organism"]),
nan_safe(row["organ"]), nan_safe(row["platform"]),
nan_safe(row["cell_number"], int), nan_safe(row["publication"]),
nan_safe(row["pmid"], lambda x: str(int(x))), nan_safe(row["remark"]),
nan_safe(row["self-projection coverage"], lambda x: float(np.round(x, 3))),
nan_safe(row["self-projection accuracy"], lambda x: float(np.round(x, 3))),
nan_safe(row["visualization"]), nan_safe(row["display"])
))
def main():
cnx = mysql.connector.connect(
user=input("Please enter username: "), password=input("Please enter password: "),
host="127.0.0.1", database="aca"
)
cursor = cnx.cursor()
create_table(cnx, cursor)
insert_data(cnx, cursor, generate_datasets_meta())
cnx.commit()
cursor.close()
cnx.close()
if __name__ == "__main__":
main()
|
[
"caozj@mail.cbi.pku.edu.cn"
] |
caozj@mail.cbi.pku.edu.cn
|
a7db53021d314e8a8940afd0b9d509d6c3431464
|
eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd
|
/homeassistant/components/wilight/light.py
|
3236b3b3851a234fc1d369afef91f7753338940f
|
[
"Apache-2.0"
] |
permissive
|
JeffLIrion/home-assistant
|
53966b81b5d5816679f12fc761f79e8777c738d6
|
8f4ec89be6c2505d8a59eee44de335abe308ac9f
|
refs/heads/dev
| 2023-08-22T09:42:02.399277
| 2022-02-16T01:26:13
| 2022-02-16T01:26:13
| 136,679,169
| 5
| 2
|
Apache-2.0
| 2023-09-13T06:59:25
| 2018-06-09T00:58:35
|
Python
|
UTF-8
|
Python
| false
| false
| 5,995
|
py
|
"""Support for WiLight lights."""
from pywilight.const import (
ITEM_LIGHT,
LIGHT_COLOR,
LIGHT_DIMMER,
LIGHT_ON_OFF,
SUPPORT_NONE,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN, WiLightDevice
def entities_from_discovered_wilight(hass, api_device):
"""Parse configuration and add WiLight light entities."""
entities = []
for item in api_device.items:
if item["type"] != ITEM_LIGHT:
continue
index = item["index"]
item_name = item["name"]
if item["sub_type"] == LIGHT_ON_OFF:
entity = WiLightLightOnOff(api_device, index, item_name)
elif item["sub_type"] == LIGHT_DIMMER:
entity = WiLightLightDimmer(api_device, index, item_name)
elif item["sub_type"] == LIGHT_COLOR:
entity = WiLightLightColor(api_device, index, item_name)
else:
continue
entities.append(entity)
return entities
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up WiLight lights from a config entry."""
parent = hass.data[DOMAIN][entry.entry_id]
# Handle a discovered WiLight device.
entities = entities_from_discovered_wilight(hass, parent.api)
async_add_entities(entities)
class WiLightLightOnOff(WiLightDevice, LightEntity):
"""Representation of a WiLights light on-off."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_NONE
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
class WiLightLightDimmer(WiLightDevice, LightEntity):
"""Representation of a WiLights light dimmer."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._status.get("brightness", 0))
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on,set brightness if needed."""
# Dimmer switches use a range of [0, 255] to control
# brightness. Level 255 might mean to set it to previous value
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
await self._client.set_brightness(self._index, brightness)
else:
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
def wilight_to_hass_hue(value):
"""Convert wilight hue 1..255 to hass 0..360 scale."""
return min(360, round((value * 360) / 255, 3))
def hass_to_wilight_hue(value):
"""Convert hass hue 0..360 to wilight 1..255 scale."""
return min(255, round((value * 255) / 360))
def wilight_to_hass_saturation(value):
"""Convert wilight saturation 1..255 to hass 0..100 scale."""
return min(100, round((value * 100) / 255, 3))
def hass_to_wilight_saturation(value):
"""Convert hass saturation 0..100 to wilight 1..255 scale."""
return min(255, round((value * 255) / 100))
class WiLightLightColor(WiLightDevice, LightEntity):
"""Representation of a WiLights light rgb."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._status.get("brightness", 0))
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return [
wilight_to_hass_hue(int(self._status.get("hue", 0))),
wilight_to_hass_saturation(int(self._status.get("saturation", 0))),
]
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on,set brightness if needed."""
# Brightness use a range of [0, 255] to control
# Hue use a range of [0, 360] to control
# Saturation use a range of [0, 100] to control
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
hue = hass_to_wilight_hue(kwargs[ATTR_HS_COLOR][0])
saturation = hass_to_wilight_saturation(kwargs[ATTR_HS_COLOR][1])
await self._client.set_hsb_color(self._index, hue, saturation, brightness)
elif ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR not in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
await self._client.set_brightness(self._index, brightness)
elif ATTR_BRIGHTNESS not in kwargs and ATTR_HS_COLOR in kwargs:
hue = hass_to_wilight_hue(kwargs[ATTR_HS_COLOR][0])
saturation = hass_to_wilight_saturation(kwargs[ATTR_HS_COLOR][1])
await self._client.set_hs_color(self._index, hue, saturation)
else:
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
|
[
"noreply@github.com"
] |
JeffLIrion.noreply@github.com
|
1bb80b25bf87d695dd5433efee4ab2a9b1aa572c
|
483508a4e002bcd734b8729459d3e5d5e02aae70
|
/number_frequency.py
|
27b6776f20516181ec134ca21ebb9c493c09bc5c
|
[] |
no_license
|
jdavid54/benford_law
|
9d54cd539130bc3665080ca801d1bb4db96a18a9
|
3ff9d8358f59fef60f401c290ceb94701613e1b2
|
refs/heads/main
| 2023-07-18T03:56:18.685081
| 2021-08-25T10:44:37
| 2021-08-25T10:44:37
| 399,751,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
import numpy as np
import random
import matplotlib.pyplot as plt
# benford's law
# value
l1 = 10000
# size
l2 = 100
freq=[0]*10
x = np.arange(1,10)
'''
a = np.random.randint(1,l1,(1,l2))
print(a)
for i in np.array(*a):
n = int(str(i)[0])
#print(n)
freq[n] = freq[n]+1
print(freq)
plt.bar(x,freq[1:])
#plt.show()
for i in range(100):
n = int(str(a[0][np.random.randint(0,l2)])[0])
#print(n)
freq[n] = freq[n]+1
print(freq)
plt.bar(x,freq[1:])
#plt.show()
'''
# loi benford
log_array=[]
for k in x:
print((1+1/k, np.log10(1+1/k)))
log_array.append(np.log10(1+1/k))
#print('sum',sum(log_array)) # sum=1
#plt.bar(x, np.log10(1+1/x)*100)
#plt.title('Loi Benford')
#plt.show()
# https://fr.wikipedia.org/wiki/Loi_de_Benford
# Par contre, dans une liste de 100 nombres obtenus comme produits de deux nombres
# ou plus tirés au hasard entre 1 et 10 000, les fréquences des chiffres 1 à 9 en
# première position suivent peu ou prou les valeurs de la loi de Benford.
val = 10000
numbers=[]
m = 5
kmin = 2
kmax = 5
klist = []
benford=[np.log10(1+1/x) for x in range(1,10)]
print(benford)
benford_cumsum = np.cumsum(benford)
print(benford_cumsum)
# get 100 numbers as a product of k random numbers between 1 and val=10000
for i in range(m*100):
p = 1
k = random.randint(kmin,kmax)
if k not in klist:
klist.append(k)
for i in range(k):
p *= np.random.randint(1,val)
p0 = int(str(p)[0])
numbers.append((k,p0,p))
freq[p0] = freq[p0]+1
freq=[f/m for f in freq]
freq_cumul = np.cumsum(freq)
print(freq[1:])
print(klist)
print(numbers)
plt.bar(x-0.2,np.log10(1+1/x)*100,0.4, label='Benford\'s law')
plt.bar(x+0.2,freq[1:],0.4, label='Product of k random numbers')
plt.title(', '.join([str(round(s,1)) for s in freq[1:]]))
plt.legend()
plt.show()
plt.bar(x-0.2, benford_cumsum*100,0.4, label='Benford\'s cumul sum')
plt.bar(x+0.2,freq_cumul[1:],0.4, label='Product of k random numbers frequence cumul sum')
#plt.bar(x,freq_cumul[1:])
plt.title('Fréquences cumulées')
plt.legend()
plt.show()
|
[
"noreply@github.com"
] |
jdavid54.noreply@github.com
|
a44f361047b27f3505d603357681d2fca47f37b6
|
bad686ba27539a3d3286418cc3ebf2aa80ae4958
|
/src/pong/full-game.py
|
383a097d39786a83f75f9eefa942508b67aa3626
|
[] |
no_license
|
AaryaBatchu/micropython
|
f0a31b579b3a998586f26b92036875c93588eca7
|
aef7d33937352e9ab6f9615bfc5bf9aa1a9bee57
|
refs/heads/main
| 2023-08-19T13:33:15.006432
| 2021-10-23T19:06:26
| 2021-10-23T19:06:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,452
|
py
|
# Pong game on Raspberry Pi Pico with a OLED and two Potentimeters
from machine import Pin, PWM, SPI
import ssd1306
from utime import sleep
import random # random direction for new ball
sda=machine.Pin(0)
scl=machine.Pin(1)
pot_pin = machine.ADC(26)
WIDTH = 128
HEIGHT = 64
i2c=machine.I2C(0,sda=sda, scl=scl)
oled = ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c)
# connect the center tops of the potentiometers to ADC0 and ADC1
pot_pin_1 = machine.ADC(27)
pot_pin_2 = machine.ADC(26) # make them the same for testing
# lower right corner with USB connector on top
SPEAKER_PIN = 16
# create a Pulse Width Modulation Object on this pin
speaker = PWM(Pin(SPEAKER_PIN))
# globals variables
# static variables are constants are uppercase variable names
HALF_WIDTH = int(WIDTH / 2)
HALF_HEIGHT = HEIGHT
BALL_SIZE = 3 # 2X2 pixels
PAD_WIDTH = 2
PAD_HEIGHT = 8
HALF_PAD_WIDTH = int(PAD_WIDTH / 2)
HALF_PAD_HEIGHT = int(PAD_HEIGHT / 2)
POT_MIN = 3000
POT_MAX = 65534
MAX_ADC_VALUE = 65534 # Maximum value from the Analog to Digital Converter is 2^16 - 1
# dynamic global variables use lowercase
paddle1_vel = 0
paddle2_vel = 0
l_score = 0
r_score = 0
# continiuous update of the paddle and ball
# play_startup_sound()
# start with the ball in the center
ball_x = int(WIDTH / 2)
ball_y = int(HEIGHT / 2)
# set the initial directinon to down to the right
ball_x_dir = 1
ball_y_dir = 1
def play_startup_sound():
speaker.duty_u16(1000)
speaker.freq(600)
sleep(.25)
speaker.freq(800)
sleep(.25)
speaker.freq(1200)
sleep(.25)
speaker.duty_u16(0)
def play_bounce_sound():
speaker.duty_u16(1000)
speaker.freq(900)
sleep(.25)
speaker.duty_u16(0)
def play_score_sound():
speaker.duty_u16(1000)
speaker.freq(600)
sleep(.25)
speaker.freq(800)
sleep(.25)
speaker.duty_u16(0)
# note that OLEDs have problems with screen burn it - don't leave this on too long!
def border(WIDTH, HEIGHT):
oled.rect(0, 0, WIDTH, HEIGHT, 1)
# Takes an input number vale and a range between high-and-low and returns it scaled to the new range
# This is similar to the Arduino map() function
def valmap(value, istart, istop, ostart, ostop):
return int(ostart + (ostop - ostart) * ((value - istart) / (istop - istart)))
# draw a vertical bar
def draw_paddle(paddle_no, paddle_center):
if paddle_no == 1:
x = 0
else:
x = WIDTH - 2
y = paddle_center - HALF_PAD_HEIGHT
oled.fill_rect(x, y, PAD_WIDTH, PAD_HEIGHT, 1) # fill with 1s
def draw_ball():
oled.fill_rect(ball_x, ball_y, BALL_SIZE, BALL_SIZE, 1) # square balls for now
# The main event loop
while True:
oled.fill(0) # clear screen
oled.vline(int(WIDTH / 2), 0, HEIGHT, 1)
# border(WIDTH, HEIGHT)
# read both the pot values
pot_val_1 = pot_pin_1.read_u16()
pot_val_2 = pot_pin_2.read_u16()
# print(pot_val_1)
# scale the values from the max value of the input is a 2^16 or 65536 to 0 to HEIGHT - PAD_HEIGHT
# ideally, it should range from 5 to 58
pot_val_1 = valmap(pot_val_1, POT_MIN, POT_MAX, HALF_PAD_HEIGHT, HEIGHT - HALF_PAD_HEIGHT - 2)
pot_val_2 = valmap(pot_val_2, POT_MIN, POT_MAX, HALF_PAD_HEIGHT, HEIGHT - HALF_PAD_HEIGHT - 2)
# print(pot_val, pot_scaled)
draw_paddle(1, pot_val_1 + HALF_PAD_HEIGHT)
draw_paddle(2, pot_val_2 + HALF_PAD_HEIGHT)
draw_ball()
#update ball position with the current directions
ball_x = ball_x + ball_x_dir
ball_y = ball_y + ball_y_dir
# update the ball direction if we are at the top or bottom edge
if ball_y < 0:
ball_y_dir = 1
#play_bounce_sound()
if ball_y > HEIGHT - 3:
ball_y_dir = -1
#play_bounce_sound()
# if it hits the paddle bounce else score
if ball_x < 1:
top_paddle = pot_val_1 - HALF_PAD_HEIGHT
bottom_paddle = pot_val_1 + HALF_PAD_HEIGHT
if ball_y > top_paddle and ball_y < bottom_paddle:
# we have a hit
ball_x_dir = 1
ball_x = 2
play_bounce_sound()
print('paddle hit on left edge', pot_val_1, top_paddle, bottom_paddle)
else:
# we have a score for the right player
play_score_sound()
r_score += 1
ball_x = int(WIDTH / 2)
ball_y = int(HEIGHT / 2)
ball_x_dir = random.randint(-1, 2)
if ball_x_dir == 0:
ball_x_dir = 1
ball_y_dir = random.randint(-1, 2)
print('score on left edge', pot_val_1, top_paddle, bottom_paddle)
sleep(.25)
if ball_x > WIDTH - 3:
ball_x = WIDTH - 4
top_paddle = pot_val_2 - HALF_PAD_HEIGHT
bottom_paddle = pot_val_2 + HALF_PAD_HEIGHT
if ball_y > top_paddle and ball_y < bottom_paddle:
ball_x_dir = -1
print('bounce on right paddle', pot_val_1, top_paddle, bottom_paddle)
else:
l_score += 1
play_score_sound()
ball_x = int(WIDTH / 2)
ball_y = int(HEIGHT / 2)
ball_x_dir = random.randint(-1, 2)
if ball_x_dir == 0:
ball_x_dir = 1
ball_y_dir = random.randint(-1, 2)
play_bounce_sound()
print('score on right edge', pot_val_1, top_paddle, bottom_paddle)
sleep(.25)
oled.text(str(l_score), HALF_WIDTH - 20, 5, 1)
oled.text(str(r_score), HALF_WIDTH + 5, 5, 1)
oled.show()
|
[
"dan.mccreary@gmail.com"
] |
dan.mccreary@gmail.com
|
79a4bb8bec0d2d35bfcfb2c239be6aee46b0fd66
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_373/ch4_2020_04_12_18_58_48_907546.py
|
cde9dac5e0e2b7c03893f3ea611cee967836abd9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
def classifica_idade (idade):
input(int( 'idade: '))
if idade <=11:
print ( 'crinca')
elif idade >= 12 and idade <= 17:
print ('adolecente')
else:
print ('adulto')
|
[
"you@example.com"
] |
you@example.com
|
6405b2626aba482937b14dfeafe8be7ddfd5657d
|
6392354e74cce4a303a544c53e13d0a7b87978ee
|
/m4/socket_correlation/company_review/lock_test.py
|
154a5366cb5434bb78837c326d9e8b9c99355720
|
[] |
no_license
|
music51555/wxPythonCode
|
dc35e42e55d11850d7714a413da3dde51ccdd37e
|
f77b71ed67d926fbafd1cfec89de8987d9832016
|
refs/heads/master
| 2020-04-11T20:20:38.136446
| 2019-04-01T09:17:34
| 2019-04-01T09:17:34
| 162,067,449
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
import time
from threading import Thread,RLock
mutexA = mutexB = RLock()
class MyThread(Thread):
def __init__(self,name):
super(MyThread,self).__init__()
self.name = name
def run(self):
self.f1()
self.f2()
def f1(self):
mutexA.acquire()
print('%s得到A锁'%self.name)
mutexB.acquire()
print('%s得到B锁'%self.name)
mutexA.release()
print('%s释放A锁'%self.name)
mutexB.release()
print('%s释放B锁'%self.name)
def f2(self):
mutexB.acquire()
print('%s得到B锁'%self.name)
time.sleep(0.1)
mutexA.acquire()
print('%s得到A锁'%self.name)
mutexB.release()
print('%s释放B锁'%self.name)
mutexA.release()
print('%s释放A锁'%self.name)
if __name__ == '__main__':
for i in range(3):
m = MyThread('子线程%s'%i)
m.start()
|
[
"music51555@163.com"
] |
music51555@163.com
|
ebf5338c9d16d52fb1f01ccc605998b512d9edf6
|
c6ff2a4484c371efd97ce610832cd9772dd406e0
|
/app10_udemy/app10_udemy/wsgi.py
|
bb40d92d717d10e2eaaa247e3e39c58b6fc183fe
|
[] |
no_license
|
inderdevkumar/Upload-and-display
|
66bbb808be27d47f3ff8d57e663b58b71f62ef71
|
668beb97392f12d4b545937c18f2723919264987
|
refs/heads/master
| 2022-10-10T01:19:02.044549
| 2020-06-09T12:56:22
| 2020-06-09T12:56:22
| 271,003,802
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for app10_udemy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app10_udemy.settings')
application = get_wsgi_application()
|
[
"id0102yadav@gmail.com"
] |
id0102yadav@gmail.com
|
be38d3ebadb2460af99adbcd9d5e38e954fef300
|
89220198e6869bf13ff99f1d07b5aa0f49f23b2a
|
/modules/tools/perception/empty_prediction.py
|
33dd04981b1164fe3cb0fdef209d432d879386cd
|
[
"Apache-2.0"
] |
permissive
|
maohaihua/apollo
|
2cd073a0844a9028756582e6db4c6b66fd4f8a0a
|
a30d7a6c65a58ca82681df81211176f98eeffde2
|
refs/heads/master
| 2020-04-30T20:55:14.018814
| 2019-03-22T00:29:56
| 2019-03-22T04:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,225
|
py
|
#!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
this module creates a node and fake prediction data based
on json configurations
"""
import argparse
import math
import time
import numpy
import simplejson
from cyber_py import cyber
from modules.prediction.proto.prediction_obstacle_pb2 import PredictionObstacles
def prediction_publisher(prediction_channel, rate):
"""publisher"""
cyber.init()
node = cyber.Node("prediction")
writer = node.create_writer(prediction_channel, PredictionObstacles)
sleep_time = 1.0 / rate
seq_num = 1
while not cyber.is_shutdown():
prediction = PredictionObstacles()
prediction.header.sequence_num = seq_num
prediction.header.timestamp_sec = time.time()
prediction.header.module_name = "prediction"
print(str(prediction))
writer.write(prediction)
seq_num += 1
time.sleep(sleep_time)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="create empty prediction message",
prog="replay_prediction.py")
parser.add_argument("-c", "--channel", action="store", type=str, default="/apollo/prediction",
help="set the prediction channel")
parser.add_argument("-r", "--rate", action="store", type=int, default=10,
help="set the prediction channel publish time duration")
args = parser.parse_args()
prediction_publisher(args.channel, args.rate)
|
[
"xiaoxiangquan@gmail.com"
] |
xiaoxiangquan@gmail.com
|
97ed4b7b177f9bfd4dd65cf0fe4e612cec5f5ca7
|
c68580258e9fbe64bbf232e781d75584691de4c4
|
/tests/django_settings.py
|
2af62bb352df44c8386b6fd77435541a4214c8d9
|
[
"MIT"
] |
permissive
|
KyleAMathews/graphene
|
7e092e6e7d9575c1f736d834a2913a63bc753006
|
5738b69271fd245339f35640d375d6bc13092358
|
refs/heads/master
| 2023-08-31T21:12:22.927712
| 2015-11-30T18:08:12
| 2015-11-30T18:08:12
| 47,149,828
| 2
| 0
| null | 2015-11-30T22:24:27
| 2015-11-30T22:24:27
| null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
SECRET_KEY = 1
INSTALLED_APPS = [
'examples.starwars_django',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tests/django.sqlite',
}
}
|
[
"me@syrusakbary.com"
] |
me@syrusakbary.com
|
07dd881dfa838563a5ef9d22778cd9993402dd4c
|
22ebcc842dbc933bfa8fdad89b8b8ef48ecc91c7
|
/load/load_aes_hd.py
|
f70ad4aa349abe0942c49ece8386f2b88e6237e6
|
[] |
no_license
|
klikooo/thesis-src
|
192651c18f243c59cfa588e7052dc1a96ab0a146
|
64f2ee824afdc2d3fd0f98c6d9fcfda597b9ad9f
|
refs/heads/master
| 2020-04-16T18:16:20.638147
| 2019-08-20T14:59:52
| 2019-08-20T14:59:52
| 161,623,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,364
|
py
|
from decimal import Decimal
import torch
import numpy as np
import matplotlib.pyplot as plt
from models.load_model import load_model
from test import test_with_key_guess
import util
import pdb
path = '/media/rico/Data/TU/thesis'
#####################################################################################
# Parameters
use_hw = False
n_classes = 9 if use_hw else 256
spread_factor = 1
runs = [x for x in range(5)]
train_size = 20000
epochs = 140
batch_size = 100
lr = 0.00075
sub_key_index = 2
attack_size = 100
rank_step = 1
type_network = 'HW' if use_hw else 'ID'
unmask = False if sub_key_index < 2 else True
# network_names = ['SpreadV2', 'SpreadNet']
network_names = ['ConvNetKernel']
kernel_sizes = [3, 5, 7, 9, 11, 13, 15]
# network_names = ['ConvNet', 'ConvNetDK']
plt_titles = ['$Spread_{V2}$', '$Spread_{PH}$', '$Dense_{RT}$', '$MLP_{best}$']
only_accuracy = False
data_set = util.DataSet.RANDOM_DELAY
raw_traces = True
validation_size = 1000
#####################################################################################
data_set_name = str(data_set)
if len(plt_titles) != len(network_names):
plt_titles = network_names
device = torch.device("cuda")
# Load Data
loader = util.load_data_set(data_set)
print('Loading data set')
total_x_attack, total_y_attack, plain = loader({'use_hw': use_hw,
'traces_path': '/media/rico/Data/TU/thesis/data',
'raw_traces': raw_traces,
'start': train_size + validation_size,
'size': attack_size,
'domain_knowledge': True})
print('Loading key guesses')
key_guesses = util.load_csv('/media/rico/Data/TU/thesis/data/{}/Value/key_guesses_ALL_transposed.csv'.format(
data_set_name),
delimiter=' ',
dtype=np.int,
start=train_size + validation_size,
size=attack_size)
real_key = util.load_csv('/media/rico/Data/TU/thesis/data/{}/secret_key.csv'.format(data_set_name), dtype=np.int)
x_attack = total_x_attack
y_attack = total_y_attack
def get_ranks(x_attack, y_attack, key_guesses, runs, train_size,
epochs, lr, sub_key_index, attack_size, rank_step, unmask, network_name, kernel_size_string=""):
ranks_x = []
ranks_y = []
for run in runs:
model_path = '/media/rico/Data/TU/thesis/runs2/' \
'{}/subkey_{}/{}_SF{}_E{}_BZ{}_LR{}/train{}/model_r{}_{}{}.pt'.format(
data_set_name,
sub_key_index,
type_network,
spread_factor,
epochs,
batch_size,
'%.2E' % Decimal(lr),
train_size,
run,
network_name,
kernel_size_string)
print('path={}'.format(model_path))
# Load the model
model = load_model(network_name=network_name, model_path=model_path)
model.eval()
print("Using {}".format(model))
model.to(device)
# Number of times we test a single model + shuffle the test traces
num_exps = 100
x, y = [], []
for exp_i in range(num_exps):
permutation = np.random.permutation(x_attack.shape[0])
# permutation = np.arange(0, x_attack.shape[0])
x_attack_shuffled = util.shuffle_permutation(permutation, np.array(x_attack))
y_attack_shuffled = util.shuffle_permutation(permutation, np.array(y_attack))
key_guesses_shuffled = util.shuffle_permutation(permutation, key_guesses)
# Check if we need domain knowledge
dk_plain = None
if network_name in util.req_dk:
dk_plain = plain
dk_plain = util.shuffle_permutation(permutation, dk_plain)
x_exp, y_exp = test_with_key_guess(x_attack_shuffled, y_attack_shuffled, key_guesses_shuffled, model,
attack_size=attack_size,
real_key=real_key,
use_hw=use_hw,
plain=dk_plain)
x = x_exp
y.append(y_exp)
# Take the mean of the different experiments
y = np.mean(y, axis=0)
# Add the ranks
ranks_x.append(x)
ranks_y.append(y)
return ranks_x, ranks_y
# Test the networks that were specified
ranks_x = []
ranks_y = []
rank_mean_y = []
name_models = []
for network_name in network_names:
if network_name in util.req_kernel_size:
for kernel_size in kernel_sizes:
kernel_string = "_k{}".format(kernel_size)
x, y = get_ranks(x_attack, y_attack, key_guesses, runs, train_size, epochs, lr, sub_key_index,
attack_size, rank_step, unmask, network_name, kernel_string)
mean_y = np.mean(y, axis=0)
ranks_x.append(x)
ranks_y.append(y)
rank_mean_y.append(mean_y)
name_models.append("{} K{}".format(network_name, kernel_size))
else:
x, y = get_ranks(x_attack, y_attack, key_guesses, runs, train_size, epochs, lr, sub_key_index,
attack_size, rank_step, unmask, network_name)
mean_y = np.mean(y, axis=0)
ranks_x.append(x)
ranks_y.append(y)
rank_mean_y.append(mean_y)
name_models.append(network_name)
for i in range(len(rank_mean_y)):
plt.title('Performance of {}'.format(name_models[i]))
plt.xlabel('number of traces')
plt.ylabel('rank')
plt.grid(True)
# Plot the results
for x, y in zip(ranks_x[i], ranks_y[i]):
plt.plot(x, y)
figure = plt.gcf()
plt.figure()
figure.savefig('/home/rico/Pictures/{}.png'.format(name_models[i]), dpi=100)
# plt.title('Comparison of networks')
plt.xlabel('Number of traces')
plt.ylabel('Mean rank')
plt.grid(True)
for i in range(len(rank_mean_y)):
plt.plot(ranks_x[i][0], rank_mean_y[i], label=name_models[i])
plt.legend()
# plt.figure()
figure = plt.gcf()
figure.savefig('/home/rico/Pictures/{}.png'.format('mean'), dpi=100)
plt.show()
|
[
"rico12978@hotmail.com"
] |
rico12978@hotmail.com
|
e664beb018a1c9ae9d3a87597696086278f40c0e
|
dbe012dbedc967332ae58414473185055136d189
|
/maskrcnn_benchmark/data/transforms/transforms.py
|
283a9d3055d8b0ed951c6e0ec938684bcaf74ce3
|
[
"MIT"
] |
permissive
|
kevincao91/maskrcnn
|
87561a023939a71d624252dd44f4c882b2dfa2a6
|
a55f6ab82219329e353a20dd53c3f25f4375f537
|
refs/heads/master
| 2020-09-24T18:41:36.565752
| 2020-05-07T05:45:39
| 2020-05-07T05:45:39
| 225,819,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,511
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target=None):
size = self.get_size(image.size)
#print('get size:', size)
image = F.resize(image, size)
if target is None:
return image
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class RandomVerticalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.vflip(image)
target = target.transpose(1)
return image, target
class ColorJitter(object):
def __init__(self,
brightness=None,
contrast=None,
saturation=None,
hue=None,
):
self.color_jitter = torchvision.transforms.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,)
def __call__(self, image, target):
image = self.color_jitter(image)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target=None):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image
return image, target
|
[
"kevin_cao_91@163.com"
] |
kevin_cao_91@163.com
|
59bebd47be55198c6ec48813b99966195120cdd5
|
3b3b9bbc39c50a270e96b4394024f1753e35aaec
|
/ncbly/spiders/spider.py
|
30b8ffc51883646ef9f6e34a1fd77a8c78d021b7
|
[] |
no_license
|
hristo-grudev/ncbly
|
f94e2fdc8d556fba416d556cac5649b7f492c7c5
|
6b33ceb9b287ed0047f4676b3c036dc0b7c8e08a
|
refs/heads/main
| 2023-04-11T02:09:07.152764
| 2021-04-15T06:30:42
| 2021-04-15T06:30:42
| 358,152,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
import scrapy
from scrapy import FormRequest
from scrapy.loader import ItemLoader
from ..items import NcblyItem
from itemloaders.processors import TakeFirst
class NcblySpider(scrapy.Spider):
name = 'ncbly'
start_urls = ['https://www.ncb.ly/en/media-center/news/']
def parse(self, response):
post_links = response.xpath('//h4/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//a[text()="Next"]/@href').getall()
if next_page:
yield FormRequest.from_response(response, formdata={
'__EVENTTARGET': 'ctl00$cph_body$pgrCustomRepeater$ctl02$ctl00'}, callback=self.parse)
def parse_post(self, response):
title = response.xpath('//h1[@class="new-mc-big-title"]/text()').get()
description = response.xpath('//div[@class="col col_8_of_12 mc-body"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description if '{' not in p]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="new-mc-big-date"]/text()').get()
item = ItemLoader(item=NcblyItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
24e05aac27a7eee6799ab5ec26fcb11af42151c3
|
147389cf28e0c92f0b2ef8000b0562a71d766880
|
/pp.py
|
764a1b7b187b101da8e094613efc9e68f4b889cc
|
[] |
no_license
|
dohyekim/hello
|
3821ca97079818c9938df33fc6d8d6ea9ca763a5
|
84f5704fe6cb6e5b63fb7903e311b650d65a394a
|
refs/heads/master
| 2022-02-25T11:12:31.077335
| 2019-09-22T06:23:00
| 2019-09-22T06:23:00
| 156,352,382
| 1
| 0
| null | 2022-02-12T11:48:53
| 2018-11-06T08:39:18
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 707
|
py
|
import requests
from bs4 import BeautifulSoup
import json
url = "https://www.melon.com/chart/index.htm"
headers = {
'Referer': 'https://www.melon.com/',
'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
html = requests.get(url, headers = headers).text
soup = BeautifulSoup(html, 'html.parser')
parameter = []
rank_50 = soup.select("table > tbody #lst50")
rank_100 = soup.select("table > tbody #lst100")
for i in rank_50:
a = i.attrs['data-song-no']
parameter.append(a)
for j in rank_100:
b = j.attrs['data-song-no']
parameter.append(b)
print(parameter)
param_ = ",".join(parameter)
print(param_)
|
[
"dhleen5@hanmail.net"
] |
dhleen5@hanmail.net
|
dc5410da4cfff303d0f5fbc6d93fce02dc7cad1f
|
79ed3f72555aad8548634f523f775f34cfe166e7
|
/catch/datasets/guaroa.py
|
5acd22fecfb7fdabe5e8b72c0eb5fa30d32a8df1
|
[
"MIT"
] |
permissive
|
John-Bioinfo/catch
|
a2ab188ed598767e7759f74227f24af2b284b379
|
fe63b86bc41396c1da0b449ac440c6ae9e52b2c5
|
refs/heads/master
| 2020-03-18T09:29:10.315733
| 2018-04-17T18:36:47
| 2018-04-17T18:36:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
"""Dataset with 'Guaroa orthobunyavirus' sequences.
A dataset with 25 'Guaroa orthobunyavirus' sequences. The virus is
segmented and has 3 segments. Based on their strain and/or isolate,
these sequences were able to be grouped into 16 genomes. Many genomes
may have fewer than 3 segments.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
from os.path import dirname
from os.path import join
from os import listdir
import sys
from catch.datasets import GenomesDatasetMultiChrom
__author__ = 'Hayden Metsky <hayden@mit.edu>'
chrs = ["segment_" + seg for seg in ['L', 'M', 'S']]
def seq_header_to_chr(header):
import re
c = re.compile(r'\[segment (L|M|S)\]')
m = c.search(header)
if not m:
raise ValueError("Unknown segment in header %s" % header)
seg = m.group(1)
valid_segs = ['L', 'M', 'S']
if seg not in valid_segs:
raise ValueError("Unknown segment %s" % seg)
return "segment_" + seg
ds = GenomesDatasetMultiChrom(__name__, __file__, __spec__,
chrs, seq_header_to_chr)
for f in listdir(join(dirname(__file__), "data/guaroa/")):
ds.add_fasta_path("data/guaroa/" + f, relative=True)
sys.modules[__name__] = ds
|
[
"hmetsky@gmail.com"
] |
hmetsky@gmail.com
|
bf5657467f3bc562c237bba7906c9b1146e9b92a
|
18d7876af265ec974aa5ecf9142093d845b59020
|
/module/Favourite.py
|
d373b337f2caccf5bd99c89382bc95c872888048
|
[] |
no_license
|
xiaojieluo/savemylink
|
b6b2b806b8a760369860e2ec83fd85dece9bfd9d
|
7eb64a4742516486bebe8498374b94552c682cfe
|
refs/heads/master
| 2021-06-13T16:49:13.795891
| 2017-03-23T03:48:39
| 2017-03-23T03:48:39
| 77,986,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,113
|
py
|
#!/usr/bin/env python
# coding=utf-8
from module.DB import db
from module.define import *
class Favourite(object):
db = db
link_list = []
value_dict = dict()
favourite_public = FAVOURITE_PUBLIC
favourite_count = FAVOURITE_COUNT
def __init__(self, fid = 0):
if not isinstance(fid, int):
raise TypeError('Bad operand type')
self.db = db
self.fid = fid
self.favourite_info = FAVOURITE_INFO.format(fid=self.fid)
self.favourite_count = FAVOURITE_COUNT
self.favourite = FAVOURITE.format(fid=self.fid)
self.favourite_public = FAVOURITE_PUBLIC
@classmethod
def create(cls,info):
#info = dict(
# name='name',
# created_at = 'created_at'
#)
favourite_count = FAVOURITE_COUNT
fid = cls.db.r.incr(cls.favourite_count)
favourite_info = FAVOURITE_INFO.format(fid=fid)
cls.db.r.hmset(favourite_info, info)
if info['public']:
cls.db.r.sadd(cls.favourite_public, fid)
# only return fid
# if you want add fid to account_favourite table
# you need run down code
# user = Account(id)
# account_favourite = ACCOUNT_FAVOURITE.format(uid=uid)
# cls.db.r.sadd(account_favourite, fid)
return fid
@classmethod
def public(cls):
"""
返回所有公开的收藏夹
"""
# 在这里可以做分页
pub = cls.db.smembers(cls.favourite_public)
result = []
if pub:
for k in pub:
result.append(Favourite(k))
return result
else:
return []
@property
def isPublic(self):
public = self.db.r.sismembers(self.favourite_public, self.fid)
return public
@property
def name(self):
#favourite_info = FAVOURITE_INFO.format(fid=self.fid)
result = self.db.r.hget(self.favourite_info, 'name')
return result
@property
def author(self):
user_id = int(self.db.hget(self.favourite_info, 'author'))
# print(self.db.r.hgetall(self.favourite_info))
# print(type(user_id))
if user_id:
from lib.Account import Account
return Account(user_id)
@name.setter
def name(self, value):
self.value_dict['name'] = value
@property
def created_at(self):
#favourite_info = FAVOURITE_INFO.format(fid=self.fid)
return self.db.r.hget(self.favourite_info, 'created_at')
@created_at.setter
def created_at(self, value):
self.value_dict['created_at'] = value
# add linkid to favourite , if not run save , the data is in buffer
def addlink(self, lid):
if isinstance(lid, list):
for k in lid:
if k not in self.link_list:
self.link_list.append(lid)
else:
lid = int(lid)
if lid not in self.link_list:
#self.linkid = []
self.link_list.append(lid)
return True
#print(self.link_list)
def save(self):
# save Favourite information
if len(self.value_dict) > 0:
self.db.r.hmset(self.favourite_info, self.value_dict)
# save link id into the favourite
if len(self.link_list) > 0:
for k in self.link_list:
self.db.r.sadd(self.favourite, k)
#del self.link_list[:]
self.link_list = []
self.value_dict = {}
return True
def links(self):
# get all links in favourites,
# return Link Class
#"""
favourite_links = FAVOURITE.format(fid=self.fid)
tmp = self.db.smembers(favourite_links)
print(tmp)
# only return link id
# new class in Handler's
return tmp
#print(tmp)
#if len(tmp) > 0:
# result = []
# from lib.Link import Link
# for k in tmp:
# result.append(Link(k))
# return result
#else:
# return None
|
[
"xiaojieluoff@gmail.com"
] |
xiaojieluoff@gmail.com
|
788d74a0541595cac3c54e408e4d3d2a423cdc26
|
4b157ab5270ba430a6d7c1594ea41ceea89b7ab2
|
/dataview/items/management/commands/dbimport.py
|
202892882682d5ee3d0dae54641cf1e52de4ef88
|
[
"MIT"
] |
permissive
|
estin/pomp-craigslist-example
|
6a06e0671b189b45d7f688583c052b3e23efd010
|
c019686776ff2235f92ece9cea19874631a561b9
|
refs/heads/master
| 2021-01-10T12:07:30.035957
| 2017-11-21T19:55:40
| 2017-11-21T19:55:40
| 52,002,919
| 38
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
import logging
from django.db import transaction
from django.core.management.base import BaseCommand
from django.core.exceptions import ValidationError
from dataview.items.models import CraigsListItem
from craigslist.pipeline import KafkaPipeline
from craigslist.utils import get_statsd_client, METRIC_ITEMS_IMPORTED_KEY
log = logging.getLogger('dataview.dbimport')
class Command(BaseCommand):
help = 'import data from kafka to db'
def handle(self, *args, **options):
try:
self._handle(*args, **options)
except Exception:
log.exception("Exception")
def _handle(self, *args, **options):
statsd = get_statsd_client(sync=True)
def _items_factory(items):
for item in items:
instance = CraigsListItem(**dict(
# convert dict byte keys to string keys and use it as
# keywords
(k.decode(), v) for k, v in item.items()
))
# validate data before insert
try:
instance.full_clean()
except ValidationError as e:
log.debug('Invalid data(%s): %s', e, dict(item))
else:
yield instance
@transaction.atomic()
def do_bulk_insert(items):
cleaned_items = list(_items_factory(items))
if cleaned_items:
CraigsListItem.objects.bulk_create(cleaned_items)
return cleaned_items
log.debug(
'Start import data from kafka',
)
for items in KafkaPipeline.dump_data(
timeout=500, poll_timeout=5000, enable_auto_commit=True):
if items:
imported = do_bulk_insert(items)
log.debug(
'Successfully imported %s from %s',
len(imported), len(items),
)
statsd.incr(METRIC_ITEMS_IMPORTED_KEY, value=len(imported))
|
[
"tatarkin.evg@gmail.com"
] |
tatarkin.evg@gmail.com
|
842b53f556e40e7ee2ce73b314af3c48d09ff59a
|
44b87d9faad99d542914c35410ba7d354d5ba9cd
|
/1/examples/srearch_a_letter.py
|
db0f0ae9e3b4cbc2f8eb95912e5afe20241d5f02
|
[] |
no_license
|
append-knowledge/pythondjango
|
586292d1c7d0ddace3630f0d77ca53f442667e54
|
0e5dab580e8cc48e9940fb93a71bcd36e8e6a84e
|
refs/heads/master
| 2023-06-24T07:24:53.374998
| 2021-07-13T05:55:25
| 2021-07-13T05:55:25
| 385,247,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
x=input("enter the word ")
y=input("enter the letter you want to find ")
flag=0
for i in x:
if i in y:
flag=1
if flag==1:
print("entered word found ")
else:
print("not found")
|
[
"lijojose95@gmail.com"
] |
lijojose95@gmail.com
|
26c4e08d795fe5047e6277af93086c7796f3774d
|
f152d89efeebc5c00c54cf7819f539aec920aa2d
|
/reviewboard/webapi/decorators.py
|
02674e04c176a61adb67121de06093f144b15995
|
[
"MIT"
] |
permissive
|
yang/reviewboard
|
c1c0cee37133004c2857ed6daac136697baa92dd
|
b893e0f28bc5d561124aaf09bc8b0e164f42c7d5
|
refs/heads/master
| 2021-01-18T11:04:37.694088
| 2010-11-27T00:09:27
| 2010-11-30T00:48:14
| 1,115,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
from django.http import HttpRequest
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.decorators import simple_decorator
from djblets.webapi.core import WebAPIResponse, WebAPIResponseError
from djblets.webapi.decorators import webapi_login_required, \
webapi_response_errors
from djblets.webapi.encoders import BasicAPIEncoder
from djblets.webapi.errors import NOT_LOGGED_IN
@webapi_response_errors(NOT_LOGGED_IN)
@simple_decorator
def webapi_check_login_required(view_func):
"""
A decorator that checks whether login is required on this installation
and, if so, checks if the user is logged in. If login is required and
the user is not logged in, they'll get a NOT_LOGGED_IN error.
"""
def _check(*args, **kwargs):
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get("auth_require_sitewide_login"):
return webapi_login_required(view_func)(*args, **kwargs)
else:
return view_func(*args, **kwargs)
view_func.checks_login_required = True
return _check
def webapi_deprecated(deprecated_in, force_error_http_status=None,
default_api_format=None, encoders=[]):
"""Marks an API handler as deprecated.
``deprecated_in`` specifies the version that first deprecates this call.
``force_error_http_status`` forces errors to use the specified HTTP
status code.
``default_api_format`` specifies the default api format (json or xml)
if one isn't provided.
"""
def _dec(view_func):
def _view(*args, **kwargs):
if default_api_format:
request = args[0]
assert isinstance(request, HttpRequest)
method_args = getattr(request, request.method, None)
if method_args and 'api_format' not in method_args:
method_args = method_args.copy()
method_args['api_format'] = default_api_format
setattr(request, request.method, method_args)
response = view_func(*args, **kwargs)
if isinstance(response, WebAPIResponse):
response.encoders = encoders
if isinstance(response, WebAPIResponseError):
response.api_data['deprecated'] = {
'in_version': deprecated_in,
}
if (force_error_http_status and
isinstance(response, WebAPIResponseError)):
response.status_code = force_error_http_status
return response
return _view
return _dec
_deprecated_api_encoders = []
def webapi_deprecated_in_1_5(view_func):
from reviewboard.webapi.encoder import DeprecatedReviewBoardAPIEncoder
global _deprecated_api_encoders
if not _deprecated_api_encoders:
_deprecated_api_encoders = [
DeprecatedReviewBoardAPIEncoder(),
BasicAPIEncoder(),
]
return webapi_deprecated(
deprecated_in='1.5',
force_error_http_status=200,
default_api_format='json',
encoders=_deprecated_api_encoders)(view_func)
|
[
"chipx86@chipx86.com"
] |
chipx86@chipx86.com
|
f238d04a62268f719a0026d5246ae6552ad08c38
|
bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6
|
/AtCoder/arc/025a.py
|
eca2c9978b657cb946922fb4f5f37b40b06e0566
|
[] |
no_license
|
y-oksaku/Competitive-Programming
|
3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db
|
a3ff52f538329bed034d3008e051f30442aaadae
|
refs/heads/master
| 2021-06-11T16:14:12.635947
| 2021-05-04T08:18:35
| 2021-05-04T08:18:35
| 188,639,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
D = list(map(int, input().split()))
L = list(map(int, input().split()))
ans = 0
for d, l in zip(D, L):
ans += max(d, l)
print(ans)
|
[
"y.oksaku@stu.kanazawa-u.ac.jp"
] |
y.oksaku@stu.kanazawa-u.ac.jp
|
d130c48d189ce6a8f79f9a900a9f651c67482890
|
37fd103f6b0de68512e3cb6098d0abb9220f5a7d
|
/Python from scratch/027_inclass_reg_ip.py
|
65e07f0162c6062580b2d1a4687b6a61fbc22782
|
[] |
no_license
|
FlyingMedusa/PythonELTIT
|
720d48089738b7e629cad888f0032df3a4ccea2c
|
36ab01fc9d42337e3c76c59c383d7b1a6142f9b9
|
refs/heads/master
| 2020-09-11T18:17:17.825390
| 2020-04-21T16:38:03
| 2020-04-21T16:38:03
| 222,150,066
| 0
| 0
| null | 2020-04-21T16:38:04
| 2019-11-16T19:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
import re
words = ["eloelo320", "blah@", "192.168.0.1", "asd.asd.20"]
pattern = "^\w+$" # or (longer): "^([A-Z]|[a-z]|(\d))*$"
id_pattern = "^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$"
for word in words:
match = re.search(pattern, word)
if match:
print("matched")
else:
print("not matched")
print("*"*80)
for word in words:
match = re.search(id_pattern, word)
if match:
print("matched")
else:
print("not matched")
|
[
"sleboda.m98@gmail.com"
] |
sleboda.m98@gmail.com
|
acb51fce28782b1e64bb7fd83ce39d45260ae110
|
175d6cff12514da71aafef6b9ff48dd56a87db2d
|
/alveus/widgets/customized_menu.py
|
76d6dbee02fa885e376a161e4dfb7dd9543930bf
|
[
"MIT"
] |
permissive
|
FrederikLehn/alveus
|
d309eea98bd36f06709c55a18f0855f38b5420a9
|
71a858d0cdd8a4bbd06a28eb35fa7a8a7bd4814b
|
refs/heads/main
| 2023-06-26T02:29:59.236579
| 2021-07-30T11:07:17
| 2021-07-30T11:07:17
| 391,029,935
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,342
|
py
|
import wx
from wx.lib.agw.flatmenu import FMRendererMgr, FMRenderer, FlatMenu, FlatMenuItem
from wx.lib.agw.flatmenu import FMRendererXP, FMRendererMSOffice2007, FMRendererVista
from wx.lib.agw.artmanager import ArtManager, DCSaver
import _icons as ico
class CustomFMRendererMgr(FMRendererMgr):
def __init__(self):
super().__init__()
#if hasattr(self, '_alreadyInitialized'):
# return
#self._alreadyInitialized = True
#self._currentTheme = StyleDefault
self._currentTheme = 0
self._renderers = []
self._renderers.append(CustomFMRenderer())
#self._renderers.append(FMRendererXP())
#self._renderers.append(FMRendererMSOffice2007())
#self._renderers.append(FMRendererVista())
class CustomFMRenderer(FMRendererVista):
def __init__(self):
super().__init__()
# self.menuBarFaceColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)
#
# self.buttonBorderColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.buttonFaceColour = ArtManager.Get().LightColour(self.buttonBorderColour, 75)
# self.buttonFocusBorderColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.buttonFocusFaceColour = ArtManager.Get().LightColour(self.buttonFocusBorderColour, 75)
# self.buttonPressedBorderColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.buttonPressedFaceColour = ArtManager.Get().LightColour(self.buttonPressedBorderColour, 60)
#
# self.menuFocusBorderColour = wx.RED #wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.menuFocusFaceColour = ArtManager.Get().LightColour(self.buttonFocusBorderColour, 75)
# self.menuPressedBorderColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.menuPressedFaceColour = ArtManager.Get().LightColour(self.buttonPressedBorderColour, 60)
#
# self.menuBarFocusBorderColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.menuBarFocusFaceColour = ArtManager.Get().LightColour(self.buttonFocusBorderColour, 75)
# self.menuBarPressedBorderColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
# self.menuBarPressedFaceColour = ArtManager.Get().LightColour(self.buttonPressedBorderColour, 60)
def DrawButtonColour(self, dc, rect, state, colour):
"""
Draws a button using the Vista theme.
:param `dc`: an instance of :class:`DC`;
:param `rect`: the an instance of :class:`Rect`, representing the button client rectangle;
:param integer `state`: the button state;
:param `colour`: a valid :class:`Colour` instance.
"""
artMgr = ArtManager.Get()
# Keep old pen and brush
dcsaver = DCSaver(dc)
# same colours as used on ribbon
outer = wx.Colour(242, 201, 88)
inner = wx.WHITE
top = wx.Colour(255, 227, 125)
bottom = wx.Colour(253, 243, 204)
bdrRect = wx.Rect(*rect)
filRect = wx.Rect(*rect)
filRect.Deflate(1, 1)
r1, g1, b1 = int(top.Red()), int(top.Green()), int(top.Blue())
r2, g2, b2 = int(bottom.Red()), int(bottom.Green()), int(bottom.Blue())
dc.GradientFillLinear(filRect, top, bottom, wx.SOUTH)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(outer))
dc.DrawRoundedRectangle(bdrRect, 3)
bdrRect.Deflate(1, 1)
dc.SetPen(wx.Pen(inner))
dc.DrawRoundedRectangle(bdrRect, 2)
class CustomMenu(FlatMenu):
def __init__(self, parent=None):
super().__init__(parent=parent)
self._rendererMgr = CustomFMRendererMgr()
def CustomPopup(self):
if self.GetMenuItems():
pos = wx.GetMousePosition()
self.Popup(wx.Point(pos.x, pos.y), self.GetParent())
# common item implementations for ease of use ----------------------------------------------------------------------
def AppendCollapseItem(self, method, bind_to=None):
return self.AppendGenericItem('Collapse all', method, bitmap=ico.collapse_16x16.GetBitmap(), bind_to=bind_to)
def AppendCopyItem(self, method, bind_to=None):
return self.AppendGenericItem('Copy', method, bitmap=ico.copy_16x16.GetBitmap(), bind_to=bind_to)
def AppendCutItem(self, method, bind_to=None):
return self.AppendGenericItem('Cut', method, bitmap=ico.cut_16x16.GetBitmap(), bind_to=bind_to)
def AppendDeleteItem(self, method, bind_to=None):
return self.AppendGenericItem('Delete', method, bitmap=ico.delete_16x16.GetBitmap(), bind_to=bind_to)
def AppendExpandItem(self, method, bind_to=None):
return self.AppendGenericItem('Expand all', method, bitmap=ico.expand_16x16.GetBitmap(), bind_to=bind_to)
def AppendExportExcel(self, method, bind_to=None):
return self.AppendGenericItem('Export to Excel', method, bitmap=ico.export_spreadsheet_16x16.GetBitmap(), bind_to=bind_to)
def AppendGenericItem(self, text, method, bitmap=wx.NullBitmap, bind_to=None):
if bind_to is None:
bind_to = self.GetParent()
item = CustomMenuItem(self, wx.ID_ANY, text, normalBmp=bitmap)
self.AppendItem(item)
bind_to.Bind(wx.EVT_MENU, method, item)
return item
def AppendOpenItem(self, method, bind_to=None):
return self.AppendGenericItem('Open', method, bitmap=ico.settings_page_16x16.GetBitmap(), bind_to=bind_to)
def AppendPasteItem(self, method, bind_to=None):
return self.AppendGenericItem('Paste', method, bitmap=ico.paste_16x16.GetBitmap(), bind_to=bind_to)
class CustomMenuItem(FlatMenuItem):
def __init__(self, parent, id=wx.ID_SEPARATOR, label="", helpString="", kind=wx.ITEM_NORMAL, subMenu=None,
normalBmp=wx.NullBitmap, disabledBmp=wx.NullBitmap, hotBmp=wx.NullBitmap):
super().__init__(parent, id=id, label=label, helpString=helpString, kind=kind, subMenu=subMenu,
normalBmp=normalBmp, disabledBmp=disabledBmp, hotBmp=hotBmp)
def SetBitmap(self, bmp):
self._normalBmp = bmp
|
[
"noreply@github.com"
] |
FrederikLehn.noreply@github.com
|
2fdec4c0f0f3dab907001d6f75807c4de79d3ff9
|
6f1cadc49bc86ea49fd32c64397bfecfd9666f19
|
/C2/pulsar/implant/migrations/0002_auto_20150827_1851.py
|
ca655a540d156e556deace4637d8d630fee4b98d
|
[
"BSD-3-Clause"
] |
permissive
|
killvxk/Pulsar-1
|
f073c2273e9d4040acc3842963b018d920e78aa4
|
d290c524674eabb0444ac8c0b1ee65ea1ad44f1f
|
refs/heads/master
| 2020-06-24T22:38:25.551118
| 2019-07-27T03:45:25
| 2019-07-27T03:45:25
| 199,111,787
| 0
| 0
| null | 2019-07-27T03:44:52
| 2019-07-27T03:44:51
| null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('implant', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='implant',
name='uuid',
field=models.CharField(max_length=36),
),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
fa8beb3f3c45d810e244afa3a207660de72aae1e
|
c829a8654d4adcba7944f1aa48c2643c2a2a2803
|
/sony_utils/split.py
|
64caf82d8c17086980dca4436a62a0b48901e234
|
[] |
no_license
|
muma378/Utils
|
d85390f84226b63474c815285acb6ce351ac0c22
|
a6ae14f86de360bdabd9fa7f39cd8b05bbd505fb
|
refs/heads/master
| 2020-05-21T13:35:51.908847
| 2017-02-05T06:11:45
| 2017-02-05T06:11:45
| 48,424,512
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import os
import sys
import subprocess
import datetime
CMD_TEMPLATE = "cut.exe {src_wav} {dst_wav} {start} {end}"
NAME = "emotion_F_"
DECODING = 'gb2312' if os.name=='nt' else 'utf-8'
# split the wav as the information provided by several columns
def split_by_cols(cols_file, src_wav, dst_dir='.', name_prefix=NAME):
with open(cols_file, 'r') as f:
counter = 0
for timeline in f:
start, end, text = map(lambda x: x.strip(), timeline.split("\t"))
to_sec = lambda x: str(float(x.split(":")[0])*60 + float(x.split(":")[1]))
start, end = to_sec(start), to_sec(end)
counter += 1
dst_file = os.path.join(dst_dir, unicode(name_prefix+str(counter))).encode(DECODING)
# to generate the wave
dst_wav = dst_file + '.wav'
cmd = CMD_TEMPLATE.format(**locals())
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
subprocess.check_call(cmd, shell=True)
# to generate the text
with open(dst_file+".txt", "w") as t:
t.write(text)
if __name__ == '__main__':
split_by_cols(sys.argv[1], sys.argv[2])
|
[
"muma.378@163.com"
] |
muma.378@163.com
|
d032794e6b78ff7d03d03deda884cfbf3e772619
|
caa175a933aca08a475c6277e22cdde1654aca7b
|
/acondbs/db/__init__.py
|
5656bd250d55b0a328a26007e1eeb74511f46e9f
|
[
"MIT"
] |
permissive
|
simonsobs/acondbs
|
01d68ae40866461b85a6c9fcabdfbea46ef5f920
|
d18c7b06474b0dacb1dcf1c6dbd1e743407645e2
|
refs/heads/main
| 2023-07-07T04:33:40.561273
| 2023-06-28T22:08:00
| 2023-06-28T22:08:00
| 239,022,783
| 0
| 1
|
MIT
| 2023-06-26T20:36:39
| 2020-02-07T21:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
"""SQLAlchemy and DB related
This package contains functions, classes, and other objects that are
related to SQLAlchemy and the DB except ORM model declarations.
"""
from pathlib import Path
from flask import Flask
from flask_migrate import Migrate
from .cmds import (
backup_db_command,
dump_db_command,
export_csv_command,
import_csv_command,
init_db_command,
)
from .sa import sa
migrate = Migrate()
_MIGRATIONS_DIR = str(Path(__file__).resolve().parent.parent / 'migrations')
def init_app(app: Flask) -> None:
"""Initialize the Flask application object
This function is called by `create_app()` of Flask
Parameters
----------
app : Flask
The Flask application object, an instance of `Flask`
"""
sa.init_app(app)
migrate.init_app(app, sa, directory=_MIGRATIONS_DIR)
app.cli.add_command(init_db_command)
app.cli.add_command(dump_db_command)
app.cli.add_command(import_csv_command)
app.cli.add_command(export_csv_command)
app.cli.add_command(backup_db_command)
|
[
"tai.sakuma@gmail.com"
] |
tai.sakuma@gmail.com
|
fd78af3570754694ae18160dcad79b077bc0eeb9
|
242086b8c6a39cbc7af3bd7f2fd9b78a66567024
|
/python/PP4E-Examples-1.4/Examples/PP4E/Dbase/TableBrowser/dbview.py
|
9975899912c220e9ca0a023de57601b57da0cc5b
|
[] |
no_license
|
chuzui/algorithm
|
7537d0aa051ac4cbe9f6a7ca9a3037204803a650
|
c3006b24c4896c1242d3ceab43ace995c94f10c8
|
refs/heads/master
| 2021-01-10T13:05:30.902020
| 2015-09-27T14:39:02
| 2015-09-27T14:39:02
| 8,404,397
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
##################################################################
# view any existing shelve directly; this is more general than a
# "formtable.py shelve 1 filename" cmdline--only works for Actor;
# pass in a filename (and mode) to use this to browse any shelve:
# formtable auto picks up class from the first instance fetched;
# run dbinit1 to (re)initialize dbase shelve with a template.
##################################################################
from sys import argv
from formtable import *
from formgui import FormGui
mode = 'class'
file = '../data/mydbase-' + mode
if len(argv) > 1: file = argv[1] # dbview.py file? mode??
if len(argv) > 2: mode = argv[2]
if mode == 'dict':
table = ShelveOfDictionary(file) # view dictionaries
else:
table = ShelveOfInstance(file) # view class objects
FormGui(table).mainloop()
table.close() # close needed for some dbm
|
[
"zui"
] |
zui
|
45d7bb9e577d90e6669bedad91fe02a0067a2061
|
41cd1bcff0166ed3aab28a183a2837adaa2d9a07
|
/allauth/account/decorators.py
|
eb906aad176d794c9e8a3407a9d1495c7ae1d76d
|
[
"MIT"
] |
permissive
|
thomaspurchas/django-allauth
|
694dde8615b90cd4768e7f9eda79fdcf6fe3cdb6
|
d7a8b9e13456180648450431057a206afa689373
|
refs/heads/master
| 2022-02-04T03:18:25.851391
| 2013-05-20T11:26:55
| 2013-05-20T11:26:55
| 7,754,028
| 1
| 0
|
MIT
| 2022-02-01T23:04:02
| 2013-01-22T14:44:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.shortcuts import render
from .models import EmailAddress
from .utils import send_email_confirmation
def verified_email_required(function=None,
login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Even when email verification is not mandatory during signup, there
may be circumstances during which you really want to prevent
unverified users to proceed. This decorator ensures the user is
authenticated and has a verified email address. If the former is
not the case then the behavior is identical to that of the
standard `login_required` decorator. If the latter does not hold,
email verification mails are automatically resend and the user is
presented with a page informing him he needs to verify his email
address.
"""
def decorator(view_func):
@login_required(redirect_field_name=redirect_field_name,
login_url=login_url)
def _wrapped_view(request, *args, **kwargs):
if not EmailAddress.objects.filter(user=request.user,
verified=True).exists():
send_email_confirmation(request, request.user)
return render(request,
'account/verified_email_required.html')
return view_func(request, *args, **kwargs)
return _wrapped_view
if function:
return decorator(function)
return decorator
|
[
"raymond.penners@intenct.nl"
] |
raymond.penners@intenct.nl
|
3397fdf03555cbfe28cc3fed54c3f4f02c8e6c2b
|
091155389673325cfe8b0da3dc64c113f1ded707
|
/playground/segmentation/coco/solo/solo.res50.fpn.coco.800size.1x/config.py
|
66f251fa6baf96372bfaf789658e15cbd0595e82
|
[
"Apache-2.0"
] |
permissive
|
Megvii-BaseDetection/cvpods
|
7b7c808257b757d7f94d520ea03b370105fb05eb
|
2deea5dc659371318c8a570c644201d913a83027
|
refs/heads/master
| 2023-03-22T00:26:06.248877
| 2023-03-10T10:05:26
| 2023-03-10T10:05:26
| 318,124,806
| 659
| 91
|
Apache-2.0
| 2023-03-10T10:05:28
| 2020-12-03T08:26:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,606
|
py
|
import os.path as osp
from cvpods.configs.solo_config import SOLOConfig
_config_dict = dict(
MODEL=dict(
WEIGHTS="detectron2://ImageNetPretrained/MSRA/R-50.pkl",
),
DATASETS=dict(
TRAIN=("coco_2017_train",),
TEST=("coco_2017_val",),
),
SOLVER=dict(
LR_SCHEDULER=dict(
NAME="WarmupMultiStepLR",
MAX_ITER=90000,
STEPS=(60000, 80000),
WARMUP_FACTOR=1.0 / 1000,
WARMUP_ITERS=500,
WARMUP_METHOD="linear",
GAMMA=0.1,
),
OPTIMIZER=dict(
NAME="SGD",
BASE_LR=0.01,
WEIGHT_DECAY=0.0001,
MOMENTUM=0.9,
),
CHECKPOINT_PERIOD=5000,
IMS_PER_BATCH=16,
IMS_PER_DEVICE=2,
BATCH_SUBDIVISIONS=1,
),
INPUT=dict(
AUG=dict(
TRAIN_PIPELINES=[
("ResizeShortestEdge",
dict(short_edge_length=(800,), max_size=1333, sample_style="choice")),
("RandomFlip", dict()),
],
TEST_PIPELINES=[
("ResizeShortestEdge",
dict(short_edge_length=800, max_size=1333, sample_style="choice")),
],
)
),
OUTPUT_DIR=osp.join(
'/data/Outputs/model_logs/cvpods_playground',
osp.split(osp.realpath(__file__))[0].split("playground/")[-1]
),
)
class CustomSOLOConfig(SOLOConfig):
def __init__(self):
super(CustomSOLOConfig, self).__init__()
self._register_configuration(_config_dict)
config = CustomSOLOConfig()
|
[
"wangfeng02@megvii.com"
] |
wangfeng02@megvii.com
|
73199b52b898b470c3bb8e2c68de555ebab6a237
|
354ff630d5eed81ffe67be28dd82b990a733a1cd
|
/pysim/information/histogram.py
|
b5c2e51802a07e918c9766dcc879d029036a221c
|
[
"MIT"
] |
permissive
|
superpig99/pysim
|
22ba1521c0002f815f5d074114109461e0cc35fc
|
4cd5f0987d3cbdeba1c932ca845df1b0bd9d46bf
|
refs/heads/master
| 2023-05-15T05:30:01.272708
| 2020-04-02T14:25:35
| 2020-04-02T14:25:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
from typing import Union, Optional, Dict
import numpy as np
from scipy import stats
def hist_entropy(
X: np.ndarray,
bins: Union[str, int] = "auto",
correction: bool = True,
hist_kwargs: Optional[Dict] = {},
) -> float:
"""Calculates the entropy using the histogram of a univariate dataset.
Option to do a Miller Maddow correction.
Parameters
----------
X : np.ndarray, (n_samples)
the univariate input dataset
bins : {str, int}, default='auto'
the number of bins to use for the histogram estimation
correction : bool, default=True
implements the Miller-Maddow correction for the histogram
entropy estimation.
hist_kwargs: Optional[Dict], default={}
the histogram kwargs to be used when constructing the histogram
See documention for more details:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
Returns
-------
H_hist_entropy : float
the entropy for this univariate histogram
Example
-------
>> from scipy import stats
>> from pysim.information import histogram_entropy
>> X = stats.gamma(a=10).rvs(1_000, random_state=123)
>> histogram_entropy(X)
array(2.52771628)
"""
# get histogram
hist_counts = np.histogram(X, bins=bins, **hist_kwargs)
# create random variable
hist_dist = stats.rv_histogram(hist_counts)
# calculate entropy
H = hist_dist.entropy()
# MLE Estimator with Miller-Maddow Correction
if correction == True:
H += 0.5 * (np.sum(hist_counts[0] > 0) - 1) / hist_counts[0].sum()
return H
|
[
"emanjohnson91@gmail.com"
] |
emanjohnson91@gmail.com
|
6bd7da921ee4e5f2c38d8dd8832742960949e196
|
caac09a412ed9783e31e6254ba937d2ff1495dc8
|
/test/calculator_tests.py
|
974b8a61d0acd5e288c2f2b26d39039e3047ccc2
|
[
"MIT"
] |
permissive
|
ace-racer/lint-ut-circleci
|
c01095e9e41137a80499a03a81075ec86b4a9862
|
f1d6b43f97b5146c4a168636d8517a8d02a3b21e
|
refs/heads/master
| 2020-08-29T07:15:51.532944
| 2019-10-28T05:30:34
| 2019-10-28T05:30:34
| 217,963,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
import unittest
from calculator import Calculator
class CalculatorTests(unittest.TestCase):
def test_add(self):
calculator = Calculator()
self.assertEqual(calculator.add(10, 20), 30)
def test_subtract(self):
calculator = Calculator()
self.assertEqual(calculator.subtract(10, 20), -10)
def test_multiply(self):
calculator = Calculator()
self.assertEqual(calculator.multiply(10, 20), 20)
def test_divide(self):
calculator = Calculator()
self.assertEqual(calculator.divide(10, 20), 0.5)
def suite():
"""
Test suite
:return: The test suite
"""
suite = unittest.TestSuite()
suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(CalculatorTests)
)
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
[
"anuragchatterjee92@gmail.com"
] |
anuragchatterjee92@gmail.com
|
6a5cafcf6f8b670c1c3a830f0502074d89470102
|
0dfc473870552ac9384a8b24e96046728a42f6ed
|
/utest/model/test_control.py
|
1a17f6adb5d948f24ea2250696f2a05d093168e7
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
rmf/robotframework
|
fecb4821fd308d107ae94ee3077a2d968ad9163d
|
a26cd326d1a397edc56993c453380dcd9b49e407
|
refs/heads/master
| 2023-09-03T07:04:30.300003
| 2021-11-16T11:01:32
| 2021-11-16T11:01:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,125
|
py
|
import unittest
from robot.model import For, If, IfBranch, TestCase
from robot.utils.asserts import assert_equal
IF = If.IF
ELSE_IF = If.ELSE_IF
ELSE = If.ELSE
class TestFor(unittest.TestCase):
def test_string_reprs(self):
for for_, exp_str, exp_repr in [
(For(),
'FOR IN ',
"For(variables=(), flavor='IN', values=())"),
(For(('${x}',), 'IN RANGE', ('10',)),
'FOR ${x} IN RANGE 10',
"For(variables=('${x}',), flavor='IN RANGE', values=('10',))"),
(For(('${x}', '${y}'), 'IN ENUMERATE', ('a', 'b')),
'FOR ${x} ${y} IN ENUMERATE a b',
"For(variables=('${x}', '${y}'), flavor='IN ENUMERATE', values=('a', 'b'))"),
(For([u'${\xfc}'], 'IN', [u'f\xf6\xf6']),
u'FOR ${\xfc} IN f\xf6\xf6',
u"For(variables=[%r], flavor='IN', values=[%r])" % (u'${\xfc}', u'f\xf6\xf6'))
]:
assert_equal(str(for_), exp_str)
assert_equal(repr(for_), 'robot.model.' + exp_repr)
class TestIf(unittest.TestCase):
def test_type(self):
assert_equal(IfBranch().type, IF)
assert_equal(IfBranch(type=ELSE).type, ELSE)
assert_equal(IfBranch(type=ELSE_IF).type, ELSE_IF)
def test_type_with_nested_if(self):
branch = IfBranch()
branch.body.create_if()
assert_equal(branch.body[0].body.create_branch().type, IF)
assert_equal(branch.body[0].body.create_branch(ELSE_IF).type, ELSE_IF)
assert_equal(branch.body[0].body.create_branch(ELSE).type, ELSE)
def test_root_id(self):
assert_equal(If().id, None)
assert_equal(TestCase().body.create_if().id, None)
def test_branch_id_without_parent(self):
assert_equal(IfBranch().id, 'k1')
def test_branch_id_with_only_root(self):
root = If()
assert_equal(root.body.create_branch().id, 'k1')
assert_equal(root.body.create_branch().id, 'k2')
def test_branch_id_with_real_parent(self):
root = TestCase().body.create_if()
assert_equal(root.body.create_branch().id, 't1-k1')
assert_equal(root.body.create_branch().id, 't1-k2')
def test_string_reprs(self):
for if_, exp_str, exp_repr in [
(IfBranch(),
'IF None',
"IfBranch(type='IF', condition=None)"),
(IfBranch(condition='$x > 1'),
'IF $x > 1',
"IfBranch(type='IF', condition='$x > 1')"),
(IfBranch(ELSE_IF, condition='$x > 2'),
'ELSE IF $x > 2',
"IfBranch(type='ELSE IF', condition='$x > 2')"),
(IfBranch(ELSE),
'ELSE',
"IfBranch(type='ELSE', condition=None)"),
(IfBranch(condition=u'$x == "\xe4iti"'),
u'IF $x == "\xe4iti"',
u"IfBranch(type='IF', condition=%r)" % u'$x == "\xe4iti"'),
]:
assert_equal(str(if_), exp_str)
assert_equal(repr(if_), 'robot.model.' + exp_repr)
if __name__ == '__main__':
unittest.main()
|
[
"peke@iki.fi"
] |
peke@iki.fi
|
29c48053784a6f6b40a6b5ba0c848c3ad67b2000
|
420eecae12598477a4005026a250a94bb872ef81
|
/DAGMan/setup.py
|
6c6ab5acc87dbf616290a6a869c1212b3cdc414c
|
[] |
no_license
|
chadfreer/submit-examples
|
c65da1ebf7b6aee9b20a30a4d6b48a30bd02e1c1
|
cc416b30c7ff7f133e7d3cd69854886a99e3fc91
|
refs/heads/main
| 2023-07-08T12:34:36.267389
| 2021-08-18T13:56:04
| 2021-08-18T13:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
#!/usr/bin/env python
cwd = os.getcwd()
condor_script = cwd+'/submit.condor'
retries = 2
njobs = 3
submit_script = cwd+'/scratch/dag.submit'
f_out = open(submit_script,'w')
for job_num in range(njobs):
outfile_name = 'outfile_'+str(job_num)+'A.txt'
outfile_loc = cwd+'/output/'
f_out.write("JOB\tjob" + str(job_num) +'\t' + condor_script+'\n')
f_out.write("VARS\tjob" + str(job_num) +'\t' + 'input_float = "'+str(job_num) +'"\n')
f_out.write("VARS\tjob" + str(job_num) +'\t' + 'outfile_loc = "'+str(outfile_loc) +'"\n')
f_out.write("VARS\tjob" + str(job_num) +'\t' + 'outfile_name = "'+str(outfile_name) +'"\n')
f_out.write("RETRY\tjob" + str(job_num) +'\t' + str(retries)+'\n')
f_out.close()
print('Ouput: '+submit_script)
|
[
"paus@mit.edu"
] |
paus@mit.edu
|
7d8fb50e7ee7527432b24d8fb50d44b1c35dfd89
|
74482894c61156c13902044b4d39917df8ed9551
|
/test/test_address_coins_transaction_confirmed_data_item_mined_in_block.py
|
fe17c1e565968234e57b107948c613cf49feb8da
|
[
"MIT"
] |
permissive
|
xan187/Crypto_APIs_2.0_SDK_Python
|
bb8898556ba014cc7a4dd31b10e24bec23b74a19
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
refs/heads/main
| 2023-06-22T15:45:08.273635
| 2021-07-21T03:41:05
| 2021-07-21T03:41:05
| 387,982,780
| 1
| 0
|
NOASSERTION
| 2021-07-21T03:35:29
| 2021-07-21T03:35:29
| null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.address_coins_transaction_confirmed_data_item_mined_in_block import AddressCoinsTransactionConfirmedDataItemMinedInBlock
class TestAddressCoinsTransactionConfirmedDataItemMinedInBlock(unittest.TestCase):
"""AddressCoinsTransactionConfirmedDataItemMinedInBlock unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddressCoinsTransactionConfirmedDataItemMinedInBlock(self):
"""Test AddressCoinsTransactionConfirmedDataItemMinedInBlock"""
# FIXME: construct object with mandatory attributes with example values
# model = AddressCoinsTransactionConfirmedDataItemMinedInBlock() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"kristiyan.ivanov@menasoftware.com"
] |
kristiyan.ivanov@menasoftware.com
|
aac20397a75eddaa76c1781124bc4879759427c2
|
b222a5b5a84ce5d4fa0ddb084cffd1619a84a17c
|
/sequence_equation/sequence_equation.py
|
7a5ca7223035ab0aec3fa4aea0a2b337cc528cbd
|
[] |
no_license
|
unabl4/HR
|
a51a5d461b3d126e1021646b9f210e099b8627b3
|
1aaf96734b8845c911d20a4955d3ffd64a2d16b9
|
refs/heads/master
| 2021-04-05T23:55:27.202440
| 2018-11-04T22:44:46
| 2018-11-04T22:44:46
| 125,117,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# https://www.hackerrank.com/challenges/permutation-equation/problem
#!/bin/python3
# Complete the permutationEquation function below.
def permutationEquation(p):
m = {}
for a,b in enumerate(p):
m[b] = a+1
return [m[m[x+1]] for x in range(len(p))]
n = int(input())
p = list(map(int, input().rstrip().split()))
result = permutationEquation(p)
print('\n'.join(map(str, result)))
|
[
"unabl4@gmail.com"
] |
unabl4@gmail.com
|
b0654c2a2d79501a23167110aa08c91d2f74bc55
|
ff99c677aba11e27c252f773b52cd54f5de79279
|
/ctt-server/openapi_server/models/test_artifact.py
|
eb26e77d966f8b9e136f61f7fd8c85e4776ebb27
|
[
"Apache-2.0"
] |
permissive
|
radon-h2020/radon-ctt
|
b7eeb82f59e36e2a258d0a2ba9cd9483eb3dd247
|
97fcf5e800a0129d24e119b430d94f07ca248ba9
|
refs/heads/master
| 2023-01-04T23:44:49.611599
| 2021-09-15T15:34:41
| 2021-09-15T15:34:41
| 235,379,642
| 0
| 7
|
Apache-2.0
| 2022-12-27T15:56:38
| 2020-01-21T15:48:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,920
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class TestArtifact(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, uuid=None, project_uuid=None, sut_tosca_path=None, sut_inputs_path=None, ti_tosca_path=None, ti_inputs_path=None, commit_hash=None): # noqa: E501
"""TestArtifact - a model defined in OpenAPI
:param uuid: The uuid of this TestArtifact. # noqa: E501
:type uuid: str
:param project_uuid: The project_uuid of this TestArtifact. # noqa: E501
:type project_uuid: str
:param sut_tosca_path: The sut_tosca_path of this TestArtifact. # noqa: E501
:type sut_tosca_path: str
:param ti_tosca_path: The ti_tosca_path of this TestArtifact. # noqa: E501
:type ti_tosca_path: str
:param commit_hash: The commit_hash of this TestArtifact. # noqa: E501
:type commit_hash: str
"""
self.openapi_types = {
'uuid': str,
'project_uuid': str,
'sut_tosca_path': str,
'sut_inputs_path': str,
'ti_tosca_path': str,
'ti_inputs_path': str,
'commit_hash': str
}
self.attribute_map = {
'uuid': 'uuid',
'project_uuid': 'project_uuid',
'sut_tosca_path': 'sut_tosca_path',
'sut_inputs_path': 'sut_inputs_path',
'ti_tosca_path': 'ti_tosca_path',
'ti_inputs_path': 'ti_inputs_path',
'commit_hash': 'commit_hash'
}
self._uuid = uuid
self._project_uuid = project_uuid
self._sut_tosca_path = sut_tosca_path
self._sut_inputs_path = sut_inputs_path
self._ti_tosca_path = ti_tosca_path
self._ti_inputs_path = ti_inputs_path
self._commit_hash = commit_hash
@classmethod
def from_dict(cls, dikt) -> 'TestArtifact':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The TestArtifact of this TestArtifact. # noqa: E501
:rtype: TestArtifact
"""
return util.deserialize_model(dikt, cls)
@property
def uuid(self):
"""Gets the uuid of this TestArtifact.
:return: The uuid of this TestArtifact.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TestArtifact.
:param uuid: The uuid of this TestArtifact.
:type uuid: str
"""
self._uuid = uuid
@property
def project_uuid(self):
"""Gets the project_uuid of this TestArtifact.
:return: The project_uuid of this TestArtifact.
:rtype: str
"""
return self._project_uuid
@project_uuid.setter
def project_uuid(self, project_uuid):
"""Sets the project_uuid of this TestArtifact.
:param project_uuid: The project_uuid of this TestArtifact.
:type project_uuid: str
"""
self._project_uuid = project_uuid
@property
def sut_tosca_path(self):
"""Gets the sut_tosca_path of this TestArtifact.
:return: The sut_tosca_path of this TestArtifact.
:rtype: str
"""
return self._sut_tosca_path
@sut_tosca_path.setter
def sut_tosca_path(self, sut_tosca_path):
"""Sets the sut_tosca_path of this TestArtifact.
:param sut_tosca_path: The sut_tosca_path of this TestArtifact.
:type sut_tosca_path: str
"""
self._sut_tosca_path = sut_tosca_path
@property
def sut_inputs_path(self):
"""Gets the sut_inputs_path of this TestArtifact.
:return: The sut_inputs_path of this TestArtifact.
:rtype: str
"""
return self._sut_inputs_path
@sut_inputs_path.setter
def sut_inputs_path(self, sut_inputs_path):
"""Sets the sut_inputs_path of this TestArtifact.
:param sut_inputs_path: The sut_tosca_path of this TestArtifact.
:type sut_inputs_path: str
"""
self._sut_inputs_path = sut_inputs_path
@property
def ti_tosca_path(self):
"""Gets the ti_tosca_path of this TestArtifact.
:return: The ti_tosca_path of this TestArtifact.
:rtype: str
"""
return self._ti_tosca_path
@ti_tosca_path.setter
def ti_tosca_path(self, ti_tosca_path):
"""Sets the ti_tosca_path of this TestArtifact.
:param ti_tosca_path: The ti_tosca_path of this TestArtifact.
:type ti_tosca_path: str
"""
self._ti_tosca_path = ti_tosca_path
@property
def ti_inputs_path(self):
"""Gets the ti_inputs_path of this TestArtifact.
:return: The ti_inputs_path of this TestArtifact.
:rtype: str
"""
return self._ti_inputs_path
@ti_inputs_path.setter
def ti_inputs_path(self, ti_inputs_path):
"""Sets the ti_inputs_path of this TestArtifact.
:param ti_inputs_path: The ti_tosca_path of this TestArtifact.
:type ti_inputs_path: str
"""
self._ti_inputs_path = ti_inputs_path
@property
def commit_hash(self):
"""Gets the commit_hash of this TestArtifact.
:return: The commit_hash of this TestArtifact.
:rtype: str
"""
return self._commit_hash
@commit_hash.setter
def commit_hash(self, commit_hash):
"""Sets the commit_hash of this TestArtifact.
:param commit_hash: The commit_hash of this TestArtifact.
:type commit_hash: str
"""
self._commit_hash = commit_hash
|
[
"duellmann@iste.uni-stuttgart.de"
] |
duellmann@iste.uni-stuttgart.de
|
149ff803cf2e12675ab01b204bcf549300d50aea
|
0e1a0329e1b96405d3ba8426fd4f935aa4d8b04b
|
/base/tests/test_create_free_client.py
|
33cbc04a108ef50da4ffb8fda7a8f0709f6032c5
|
[] |
no_license
|
ugik/Blitz
|
6e3623a4a03309e33dcc0b312800e8cadc26d28c
|
740f65ecaab86567df31d6a0055867be193afc3d
|
refs/heads/master
| 2021-05-03T20:15:20.516014
| 2015-03-11T12:33:34
| 2015-03-11T12:33:34
| 25,015,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,535
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class TestCreateFreeClient(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.driver.set_window_size(1300, 1000)
self.base_url = "http://127.0.0.1:8000"
self.verificationErrors = []
self.accept_next_alert = True
def test_create_free_client(self):
driver = self.driver
driver.get(self.base_url + "/client-signup?signup_key=TEST2")
driver.find_element_by_name("password1").clear()
driver.find_element_by_name("password1").send_keys("asdf")
driver.find_element_by_name("password2").clear()
driver.find_element_by_name("password2").send_keys("asdf")
driver.find_element_by_xpath("//button").click()
driver.find_element_by_link_text(u"Set up your profile →").click()
driver.find_element_by_css_selector("label.radio").click()
driver.find_element_by_name("age").clear()
driver.find_element_by_name("age").send_keys("30")
driver.find_element_by_xpath("//form[@id='setupForm']/div[3]/label[2]").click()
# Warning: assertTextPresent may require manual changes
self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
driver.find_element_by_name("weight").clear()
driver.find_element_by_name("weight").send_keys("100")
driver.find_element_by_name("height_feet").clear()
driver.find_element_by_name("height_feet").send_keys("1")
driver.find_element_by_name("height_inches").clear()
driver.find_element_by_name("height_inches").send_keys("80")
driver.find_element_by_css_selector("button.obtn.full-width").click()
driver.find_element_by_id("skip-headshot").click()
driver.find_element_by_link_text(u"Finish Signup →").click()
# Warning: assertTextPresent may require manual changes
self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
driver.get(self.base_url + "/")
# driver.find_element_by_link_text("Log Workout").click()
# import pdb; pdb.set_trace()
# driver.find_element_by_xpath("//div[2]/input").clear()
# driver.find_element_by_xpath("//div[2]/input").send_keys("90")
# driver.find_element_by_xpath("//div[3]/div[2]/input").clear()
# driver.find_element_by_xpath("//div[3]/div[2]/input").send_keys("95")
# driver.find_element_by_xpath("//div[3]/div[3]/input").clear()
# driver.find_element_by_xpath("//div[3]/div[3]/input").send_keys("7")
# driver.find_element_by_xpath("//div[4]/div[2]/input").clear()
# driver.find_element_by_xpath("//div[4]/div[2]/input").send_keys("100")
# driver.find_element_by_xpath("//div[4]/div[3]/input").clear()
# driver.find_element_by_xpath("//div[4]/div[3]/input").send_keys("8")
# driver.find_element_by_css_selector("span.small").click()
# time.sleep(1)
# driver.find_element_by_link_text("Save These Sets").click()
# driver.find_element_by_css_selector("button.obtn.log-workout-submit").click()
# Warning: assertTextPresent may require manual changes
# self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
driver.get(self.base_url + "/logout")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
[
"georgek@gmail.com"
] |
georgek@gmail.com
|
29dc5b8d43fb5302a1441222a19a7d9099bcf929
|
8ce0fd5e5c5b858fa24e388f2114885160421c03
|
/python/netuse/net_use.py
|
1fd08a9bcb6cb202b36976078d0b840d73d473a4
|
[] |
no_license
|
kong-ling/scripts
|
266e9975ae0156d6fdddf43b8f1d7ee20469b388
|
3c41c49646358d46871c8fd8ebe1ba52bdea046c
|
refs/heads/master
| 2021-01-10T08:29:34.772634
| 2020-01-03T09:04:57
| 2020-01-03T09:04:57
| 43,275,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import os
import sys
import subprocess
p = subprocess.Popen('net use',
stdout = subprocess.PIPE,
stdin = subprocess.PIPE)
print(type(p))
for drv in p.stdout.readlines():
print(drv.strip())
|
[
"kong.ling@outlook.com"
] |
kong.ling@outlook.com
|
bfc4a81a2576286e533d2b117dd711bc3d73d013
|
3c27b86f0165ab24e6b04d505e8471e032594f0b
|
/pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES2/EXT/shadow_samplers.py
|
119ce82880ccfe3b97741cc729ccd3611e990b3f
|
[
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
alexus37/AugmentedRealityChess
|
8b9ccdfffc8aee93a86a44b8ef53c034ec6a10d1
|
7f600ad153270feff12aa7aa86d7ed0a49ebc71c
|
refs/heads/master
| 2020-12-24T13:29:21.967833
| 2020-02-27T09:38:50
| 2020-02-27T09:38:50
| 31,264,034
| 1
| 1
|
MIT
| 2020-02-27T09:38:52
| 2015-02-24T14:36:34
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
'''OpenGL extension EXT.shadow_samplers
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.shadow_samplers to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/shadow_samplers.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.shadow_samplers import *
from OpenGL.raw.GLES2.EXT.shadow_samplers import _EXTENSION_NAME
def glInitShadowSamplersEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
[
"alexlelidis@gmx.de"
] |
alexlelidis@gmx.de
|
fa7e40b0bb754bc7b775b514c47ad6387e9aded8
|
1ecb394b10e9622a5a5d8845b44e4585f464d42e
|
/nncp-rpc/lib/logic/Ticket/jl.py
|
6eba7a4aba23fda0584343d0701709d8cb297dec
|
[] |
no_license
|
dragonflylxp/lottory
|
7ec28d196f58692d9d417aa5d6963c182afe260a
|
b04f115df325a58148dc19d7cdfc21b28892a6a1
|
refs/heads/master
| 2020-04-28T08:53:09.007092
| 2020-04-17T10:50:41
| 2020-04-17T10:50:41
| 175,145,951
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,125
|
py
|
#encoding=utf-8
import MySQLdb
import traceback
import define
from dbpool import db_pool
from util.tools import Log
from common.dbs import BaseModel, access
from baseticket import BaseTicket
logger = Log().getLog()
class JlTicket(BaseTicket):
def __init__(self):
super(JlTicket, self).__init__(47)
@access("w")
def save_tickets(self, params):
project = params.get("project")
tickets = params.get("tickets")
mid = params.get("mid", None)
uid = project.get("f_uid")
pid = project.get("f_pid")
lotid = project.get("f_lotid")
try:
if mid is not None:
#更新msg状态
sql = "UPDATE t_msg_record SET f_msgstatus=%s WHERE f_mid=%s AND f_msgstatus=%s"
ret = self.cursor.execute(sql, (define.MSG_STATUS_DONE, mid, define.MSG_STATUS_NEW))
if ret < 1:
logger.warning("Tickets already saved! lotid=%s|pid=%s|mid=%s", 28, pid, mid)
raise Exception("Tickets already saved!")
sql = """
INSERT INTO t_ticket_jl(
f_uid,
f_pid,
f_lotid,
f_wtype,
f_ggtype,
f_beishu,
f_zhushu,
f_allmoney,
f_fileorcode,
f_firstprocessid,
f_lastprocessid,
f_ticketstatus)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
"""
args = []
for tkt in tickets:
tpl = (uid, pid, lotid, tkt["wtype"], tkt["ggtype"], tkt["beishu"],tkt["zhushu"], tkt["allmoney"],
tkt["fileorcode"], tkt["firstprocessid"], tkt["lastprocessid"], define.TICKET_STATUS_SAVED)
args.append(tpl)
self.cursor.executemany(sql, args)
self.conn.commit()
except Exception as ex:
logger.error(traceback.format_exc())
self.conn.rollback()
raise
return pid
|
[
"noreply@github.com"
] |
dragonflylxp.noreply@github.com
|
54eef6b92d0dea189cce79be2163407619b9dcff
|
f85cc3fb482f1b71e7a749e1bcdbe90ba78fd059
|
/swap_every_two_linked_list.py
|
382cd105924360af622ba95190ca1d4012b07495
|
[] |
no_license
|
shan-mathi/InterviewBit
|
c94e091f728b9d18d55e86130756824a3637a744
|
6688e4ff54d56cf75297bb72ce67926b40e45127
|
refs/heads/main
| 2023-06-29T10:43:29.712472
| 2021-08-05T19:06:53
| 2021-08-05T19:06:53
| 364,321,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the head node in the linked list
def swapPairs(self, A):
if A is None or A.next is None:
return A
temp = ListNode(-1)
temp.next = A
current = temp
while current.next is not None and current.next.next is not None:
first = current.next
second = current.next.next
first.next = second.next
current.next = second
current.next.next = first
current = current.next.next
return temp.next
|
[
"noreply@github.com"
] |
shan-mathi.noreply@github.com
|
b5fca79b1608f0797b4b9d9f43d800951d1a52d8
|
d4c024cc1330aa86582e0e3f25d5c0f76a9ccbe0
|
/align/predict.py
|
2f6ebbfb812250d45681908e7d093c5c0b37572c
|
[] |
no_license
|
jeehyun100/insta_crawling
|
464d5a90a614ed4aab1ca28566ad87cbda279447
|
39ada39513bc3655adc2e624c786cc6fd8473a7e
|
refs/heads/master
| 2021-09-07T15:41:57.013861
| 2018-02-25T09:32:36
| 2018-02-25T09:32:36
| 118,861,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,797
|
py
|
import os
import cv2
import numpy as np
import tensorflow as tf
from scipy import misc
import align.detect_face as detect_face
#from facenet_tf.src.common import facenet
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import datetime
import dlib
from imutils.face_utils import rect_to_bb
import face_recognition
import matplotlib.pyplot as plt
class face_detect_crawling(object):
def get_boxes_frame( minsize, pnet, rnet,onet, threshold, factor, frame, detect_type, margin):
boxes = []
img_size = np.asarray(frame.shape)[0:2]
if len(img_size) == 0:
return frame, boxes
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet,
threshold, factor)
for bounding_box in bounding_boxes:
det = np.squeeze(bounding_box[0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
if detect_type == 'dlib':
bb[2] += bb[0]
bb[3] += bb[1]
elif detect_type == 'hog' or detect_type == 'cnn':
bb[1], bb[2], bb[3], bb[0] = bounding_box
if len(boxes) == 0:
boxes.append(bb)
else:
if boxes[0][2] - boxes[0][0] < bb[2] - bb[0]:
boxes[0] = bb
if len(boxes) > 0:
cropped = frame[boxes[0][1]:boxes[0][3], boxes[0][0]:boxes[0][2], :]
else:
cropped = None
return cropped, boxes
def main():
# Arguments #
_detecter = face_detect_crawling()
filename = '/home/dev/insta_crawling/data/2pmhouse/10_20180221064634.jpg'
image = cv2.imread(filename, flags=cv2.IMREAD_COLOR)
config = tf.ConfigProto(device_count={'GPU': 0})
with tf.Session(config=config) as sess:
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
#frame, self.minsize, self.pnet, self.rnet, self.onet,self.threshold, self.factor
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
margin = 90
#image_size = 300
#cropped_size = 30 # rotation use
detect_type = 'mtcnn' # dlib, mtcnn, hog, cnn
rotation = False
aligned, boxes = face_detect_crawling.get_boxes_frame(minsize, pnet, rnet,onet, threshold, factor, image, detect_type, margin)
if aligned != None:
cv2.imshow("Window", aligned);
print("success")
if __name__ == "__main__":
main()
|
[
"intwis100@naver.com"
] |
intwis100@naver.com
|
0723e9d3c2f3ed3348f8962f73db031393fd5949
|
c59738ddfb08134af01d75255c4469071a1e135e
|
/002_Python_data_analysis_from_entry_to_master/ch10_Numpy科学计算库/02_数组的操作_分片_索引_拼接/005_二维数组_水平_竖直分割split.py
|
54eb1c3acdf6b7d1c6d0d1128504a00f3cc4eed3
|
[] |
no_license
|
FelixZFB/Python_data_analysis
|
371a8460da79e8fdb30b10c02b662419b62a5998
|
62f018d88d8454afe65980efd8d771ac8691956a
|
refs/heads/master
| 2020-05-20T14:46:00.606684
| 2020-02-04T14:25:20
| 2020-02-04T14:25:20
| 185,629,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
# -*- coding:utf-8 -*-
import numpy as np
# 生成两个形状一样的二维数组
a = np.arange(16).reshape(4, 4)
print(a)
print("*" * 50)
# 水平竖直分割是拼接的反操作
# 竖直分割: 以行分割
# 水平分割: 以列分割
# 竖直分割,指定被分割为几个数组,数要被整除
b = np.vsplit(a, 2)
print(b)
print("*" * 50)
# 水平分割
c = np.hsplit(a, 2)
print(c)
print("*" * 50)
# 也可以直接使用split函数,指定轴号0,作用于列,以行分割,竖直分割列
e = np.split(a, 2, axis=0)
print(e)
|
[
"18200116656@qq.com"
] |
18200116656@qq.com
|
b0000f65f8955a9141b9c9455ff591324ae8ec6d
|
6b183b67944b169048a930e34608925fb9abdc3e
|
/xicam/plugins/imagemixinplugin.py
|
66f951c285c0e0940ac64a45eb590950abeb7fcb
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-LBNL"
] |
permissive
|
ihumphrey/Xi-cam
|
cef31bba712ebf6d330402b9a7cc24d3d096e2b8
|
a033a97c4dac55221167d9c4e914c65e835f015a
|
refs/heads/master
| 2022-05-12T22:10:24.970713
| 2021-05-12T22:29:08
| 2021-05-12T22:29:08
| 190,625,609
| 0
| 0
|
NOASSERTION
| 2019-06-06T17:52:35
| 2019-06-06T17:52:34
| null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
"""
Nothing useful here!
Why?
Because with the PluginType Plugin, we need to register the SnifferPlugin as an entrypoint for the manager to
collect them. In this case, the only meaningful part is the name of the entrypoint, not what it points to. Of course,
it has to point to something, so...
"""
from .plugin import PluginType
class ImageMixinPlugin():
"""
This is just to direct Xi-cam for how to load these plugins; its not intended to be instantiated or subclassed.
"""
needs_qt = True
|
[
"ronpandolfi@gmail.com"
] |
ronpandolfi@gmail.com
|
bc7c9459c0f70e88e0dde36873b792973860a896
|
1a24def8879972f21d846ffb3813632070e1cf12
|
/Chapter06/0602fib-func.py
|
79885098128099a23319588b0f10e18295f92798
|
[] |
no_license
|
mushahiroyuki/beginning-python
|
03bb78c8d3f678ce39662a44046a308c99f29916
|
4d761d165203dbbe3604173c404f70a3eb791fd8
|
refs/heads/master
| 2023-08-16T12:44:01.336731
| 2023-07-26T03:41:22
| 2023-07-26T03:41:22
| 238,684,870
| 5
| 4
| null | 2023-09-06T18:34:01
| 2020-02-06T12:33:26
|
Python
|
UTF-8
|
Python
| false
| false
| 659
|
py
|
#@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter06/0602fib-func.py
def fibs(num):
result = [0, 1]
for i in range(num-2):
result.append(result[-2] + result[-1])
return result
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
#実行
#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。
print(fibs(10))
print(fibs(15))
#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。
|
[
"hmusha@gmail.com"
] |
hmusha@gmail.com
|
548eab73bdde0f861d5c66edaeff558f9c6362e0
|
475d1b83b77e2730b53722f0d8d11b070f97018a
|
/authapp/migrations/backup/0015_auto_20210226_2036.py
|
f7748376c6cb26aa40cc60e4db0e3f89b135edda
|
[
"MIT"
] |
permissive
|
Gwellir/my-region
|
b651284ee4d4ec7ec892bb78a7ce3444c833d035
|
baacb7f54a19c55854fd068d6e38b3048a03d13d
|
refs/heads/main
| 2023-04-20T17:31:33.040419
| 2021-05-17T13:35:38
| 2021-05-17T13:35:38
| 336,533,029
| 0
| 1
|
MIT
| 2021-05-17T13:35:39
| 2021-02-06T12:31:08
|
Python
|
UTF-8
|
Python
| false
| false
| 591
|
py
|
# Generated by Django 3.1.6 on 2021-02-26 17:36
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('authapp', '0014_auto_20210226_2033'),
]
operations = [
migrations.AlterField(
model_name='appuser',
name='activation_key_expiry',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 27, 17, 36, 39, 366149, tzinfo=utc), verbose_name='Крайний срок текущей активации'),
),
]
|
[
"gwellir@gmail.com"
] |
gwellir@gmail.com
|
dccb5669c5b88153b3e54fa816eb2c14f67647eb
|
aa88548d729211428b3d5d7cfb9c3ba5881e168a
|
/resilient-sdk/tests/unit/test_cmds/test_dev.py
|
e757157a1b67107f6abd07b6898790070841f922
|
[
"MIT"
] |
permissive
|
svetterIO/resilient-python-api
|
784cb83aaff353e8aa6ce0000b241a693977b5b9
|
d89440ccee621cb4268ee8ebb350e47e7c9ee26b
|
refs/heads/master
| 2023-08-31T22:15:27.588822
| 2021-10-13T13:15:12
| 2021-10-13T13:15:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
import sys
import os
import shutil
import pytest
from resilient_sdk.util import package_file_helpers as package_helpers
from resilient_sdk.util.sdk_exception import SDKException
from resilient_sdk.cmds import base_cmd, CmdDev
from tests.shared_mock_data import mock_paths
def test_cmd_dev(fx_get_sub_parser, fx_cmd_line_args_dev_set_version):
cmd_dev = CmdDev(fx_get_sub_parser)
assert isinstance(cmd_dev, base_cmd.BaseCmd)
assert cmd_dev.CMD_NAME == "dev"
assert cmd_dev.CMD_HELP == "Unsupported functionality used to help develop an app"
assert cmd_dev.CMD_USAGE == """
$ resilient-sdk dev -p <path_to_package> --set-version 36.0.0"""
assert cmd_dev.CMD_DESCRIPTION == "WARNING: Use the functionality of 'dev' at your own risk"
args = cmd_dev.parser.parse_known_args()[0]
assert args.package == "fn_main_mock_integration"
def test_set_version_bad_version(fx_get_sub_parser, fx_cmd_line_args_dev_set_bad_version):
cmd_dev = CmdDev(fx_get_sub_parser)
args = cmd_dev.parser.parse_known_args()[0]
with pytest.raises(SDKException, match=r"is not a valid version"):
CmdDev._set_version(args)
def test_set_version(fx_copy_fn_main_mock_integration, fx_get_sub_parser, fx_cmd_line_args_dev_set_version):
mock_integration_name = fx_copy_fn_main_mock_integration[0]
path_fn_main_mock_integration = fx_copy_fn_main_mock_integration[1]
# Replace cmd line arg "fn_main_mock_integration" with path to temp dir location
sys.argv[sys.argv.index(mock_integration_name)] = path_fn_main_mock_integration
# Parse the setup.py file
path_setup_py_file = os.path.join(path_fn_main_mock_integration, package_helpers.BASE_NAME_SETUP_PY)
setup_py_attributes = package_helpers.parse_setup_py(path_setup_py_file, package_helpers.SUPPORTED_SETUP_PY_ATTRIBUTE_NAMES)
# Get customize.py ImportDefinition
path_customize_py = package_helpers.get_configuration_py_file_path("customize", setup_py_attributes)
customize_py_import_definition = package_helpers.get_import_definition_from_customize_py(path_customize_py)
# Get the old_version
old_version = customize_py_import_definition["server_version"]["version"]
assert old_version == "36.0.0"
# Run _set_version
cmd_dev = CmdDev(fx_get_sub_parser)
args = cmd_dev.parser.parse_known_args()[0]
cmd_dev._set_version(args)
# Get the new_version
customize_py_import_definition = package_helpers.get_import_definition_from_customize_py(path_customize_py)
new_version = customize_py_import_definition["server_version"]["version"]
assert new_version == "35.0.0"
|
[
"shane.curtin@ie.ibm.com"
] |
shane.curtin@ie.ibm.com
|
57ec71f8f366f169baa43555a895ff8842a42839
|
c3a3beda6fe3a9bbd5b240477f542a46dd92823a
|
/functions/TH/08_keyword_args.py
|
c76f034292ded2cad08e665202843d5d48be93cc
|
[] |
no_license
|
nfarnan/cs001X_examples
|
2e64955b705c8ac9c4319becf6344d36b9560e78
|
80b612249fa97ff685f345582f184d57f94bff8e
|
refs/heads/master
| 2020-12-11T14:06:00.890074
| 2020-04-14T19:41:02
| 2020-04-14T19:41:02
| 209,681,009
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
def kw_test(a=1, b=2):
print(a, b)
kw_test()
kw_test(5, 10)
kw_test(5)
kw_test(b=10)
# this will error
#kw_test(5, 10, 20)
|
[
"nlf4@pitt.edu"
] |
nlf4@pitt.edu
|
a098ada26a3eadfefcb12e2b1491533b9979db93
|
49e72df481bec1501202d7411a55b765c33355ba
|
/luminar project/functional_programming/list_comprension.py
|
c1b8cb686c667cb27c47cdcc0d73f8fa7a8b1deb
|
[] |
no_license
|
JEENUMINI/pythonpgmsupdated
|
ae6d62bc58e1d44ba81a21637335140119c76869
|
4816ec24693034af36d4b76887d34c9a499f4cc8
|
refs/heads/main
| 2023-01-23T13:36:28.478938
| 2020-12-15T18:18:07
| 2020-12-15T18:18:07
| 321,749,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
lst=[1,2,3,4,5,6]
#squares
squares=[i*i for i in lst]
print(squares)
square2=[i**2 for i in lst]
print(square2)
#fetch even no from list
even=[i for i in lst if i%2==0]
print(even)
#list placement question
# task=[i+1 if i>5 else i-1 for i in lst]
# print(task)
task=[i+1 if i>5 else (i-1 if i<5 else i) for i in lst]
print(task)
|
[
"mini13.1994@gmail.com"
] |
mini13.1994@gmail.com
|
c1ca9fea4aec41dcab2df0653fc3476363d164e9
|
ecf6fe6aa87b2c3f041acc30fab11b0cafe3dd46
|
/architecture_py/archi_v3_9.py
|
096d9099c7efed8b00206453651eecc348653e9d
|
[] |
no_license
|
antgratia/Memoire_code
|
73c7806c4576c2e73e00d9a84b1063a2c8f6b559
|
2cdc1339ea24896a6628238f6467edff80f98166
|
refs/heads/main
| 2023-06-20T16:19:07.041464
| 2021-07-13T11:53:48
| 2021-07-13T11:53:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,178
|
py
|
import numpy as np
import os
from keras import backend as K
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.models import Sequential, Model,load_model
from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D, GlobalAveragePooling2D, MaxPool2D, Concatenate, Dropout
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.utils import plot_model
import tensorflow as tf
import sys
import traceback
import csv
from time import time
type_archi = 'ALL'
epsilon = 0.001
dropout_rate = 0.5
axis = 3
compress_factor = 0.5
# load dataset
(train_x, train_y), (test_x, test_y) = keras.datasets.cifar10.load_data()
# normalize to range 0-1
train_x = train_x / 255.0
test_x = test_x / 255.0
val_x = train_x[:5000]
val_y = train_y[:5000]
# init training time
training_time = 0
# init result test/train
test_result_loss = ""
test_result_acc = ""
train_result_loss = ""
train_result_acc = ""
nb_layers = "not build"
def id_block(X, f, filters, activation):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Add()([X, X_shortcut])# SKIP Connection
X = Activation(activation)(X)
return X
def conv_block(X, f, filters, activation, s=2):
X_shortcut = X
X = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding='same', kernel_initializer=glorot_uniform(seed=0))(X)
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X_shortcut = Conv2D(filters=filters, kernel_size=(1, 1), strides=(s, s), padding='valid', kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
if epsilon != 0:
X_shortcut = BatchNormalization(epsilon = epsilon, axis=axis)(X_shortcut)
X = Add()([X, X_shortcut])
X = Activation(activation)(X)
return X
def denseBlock(X, f, nb_filter, nb_layer, padding, activation):
x_input = X
for _ in range(0,nb_layer):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
X = Concatenate()([X, x_input])
return X
def transition_block(X, f, nb_filter, padding, activation, op, stride):
if epsilon != 0:
X = BatchNormalization(epsilon = epsilon, axis=axis)(X)
X = Activation(activation)(X)
X = Conv2D(filters=nb_filter, kernel_size=(f, f), strides=(1, 1), padding=padding)(X)
if dropout_rate != 0:
X = Dropout(dropout_rate)(X)
if (op == 'avg'):
X = AveragePooling2D(pool_size = f, strides=stride, padding=padding)(X)
else :
X = MaxPooling2D(pool_size=f, strides=stride, padding=padding)(X)
return X
try:
def getModel():
X_input = X = Input([32, 32, 3])
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = denseBlock(X, 4, 3, 2, 'same', 'tanh')
X = transition_block(X, 4, 3, 'same', 'tanh', 'avg', 1)
X = id_block(X, 5, 3, 'tanh')
X = Conv2D(18, kernel_size=2, strides=1, activation='relu', padding='same')(X)
X = Conv2D(36, kernel_size=3, strides=3, activation='tanh', padding='same')(X)
X = MaxPooling2D(pool_size=5, strides=4, padding='same')(X)
X = denseBlock(X, 7, 36, 1, 'same', 'tanh')
X = denseBlock(X, 7, 36, 1, 'same', 'tanh')
X = transition_block(X, 7, 36, 'same', 'tanh', 'avg', 5)
X = GlobalAveragePooling2D()(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=X_input, outputs=X)
return model
model = getModel()
#plot_model(model, show_shapes=True, to_file="../architecture_img/archi_v3_9.png")
model.compile(optimizer='adam', loss=keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
start = time()
es = tf.keras.callbacks.EarlyStopping(monitor='loss', verbose=1, restore_best_weights=True, patience=1)
list_cb = [es]
history = model.fit(train_x, train_y, epochs=50, batch_size=64, validation_split=0.3, callbacks=list_cb)
training_time = time()-start
print(model.evaluate(test_x, test_y))
log_file = open("../architecture_log/archi_v3_9.log" , "w")
# save test result
log_file.write('test result : ' + str(model.evaluate(test_x, test_y)))
test_result_loss = model.evaluate(test_x, test_y)[0]
test_result_acc = model.evaluate(test_x, test_y)[1]
# save train result
log_file.write('train result : ' + str(model.evaluate(test_x, test_y)))
log_file.write('History train result : ' + str(history.history))
train_result_loss = model.evaluate(train_x, train_y)[0]
train_result_acc = model.evaluate(train_x, train_y)[1]
print('OK: file ../architecture_log/archi_v3_9.log has been create')
nb_layers = len(model.layers)
log_file.close()
except:
print('error: file ../architecture_log/archi_v3_9_error.log has been create')
error_file = open("../architecture_log/archi_v3_9_error.log" , "w")
traceback.print_exc(file=error_file)
result_loss = "Error"
result_acc = "Error"
error_file.close()
finally:
file = open('../architecture_results_v3.csv', 'a', newline ='')
with file:
# identifying header
header = ['file_name', 'training_time(s)', 'test_result_loss', 'test_result_acc', 'train_result_acc', 'train_result_loss', 'nb_layers', 'epochs', 'type_archi']
writer = csv.DictWriter(file, fieldnames = header)
# writing data row-wise into the csv file
# writer.writeheader()
writer.writerow({'file_name' : 'archi_v3_9',
'training_time(s)': training_time,
'test_result_loss': test_result_loss,
'test_result_acc': test_result_acc,
'train_result_acc': train_result_acc,
'train_result_loss': train_result_loss,
'nb_layers': nb_layers,
'epochs' : len(history.history['loss']),
'type_archi': type_archi})
print('add line into architecture_results_v3.csv')
file.close()
|
[
"antoine.gratia@student.unamur.be"
] |
antoine.gratia@student.unamur.be
|
c18790f1c9ea9c59ebe70356fd6eafa773ba7a3f
|
32ef8621468095bf9c6dd912767cb97e9863dc25
|
/algorithms/kaprekar-numbers.py
|
d4e44e799005d22fc4109908b61ebb0ee1e5e43c
|
[] |
no_license
|
Seungju182/Hackerrank
|
286f1666be5797c1d318788753245696ef52decf
|
264533f97bcc8dc771e4e6cbae1937df8ce6bafa
|
refs/heads/master
| 2023-08-17T22:49:58.710410
| 2021-10-25T09:40:46
| 2021-10-25T09:40:46
| 337,652,088
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'kaprekarNumbers' function below.
#
# The function accepts following parameters:
# 1. INTEGER p
# 2. INTEGER q
#
def kaprekarNumbers(p, q):
# Write your code here
lst = []
for num in range(p, q+1):
squared = num ** 2
d = 10 ** len(str(num))
if squared // d + squared % d == num:
lst.append(num)
if lst:
print(*lst)
else:
print("INVALID RANGE")
if __name__ == '__main__':
p = int(input().strip())
q = int(input().strip())
kaprekarNumbers(p, q)
|
[
"tonysj@snu.ac.kr"
] |
tonysj@snu.ac.kr
|
9ca5ac9b0309eeb6b7ae197443b0c2be0b04ea69
|
59ac1d0f09ebfb527701031f3ab2cfbfb8055f51
|
/soapsales/customers/signals.py
|
fc93f81a6adc77f22799cb456aa27326ae4c6f21
|
[] |
no_license
|
DUMBALINYOLO/erpmanu
|
d4eb61b66cfa3704bd514b58580bdfec5639e3b0
|
db979bafcc7481f60af467d1f48d0a81bbbfc1aa
|
refs/heads/master
| 2023-04-28T13:07:45.593051
| 2021-05-12T09:30:23
| 2021-05-12T09:30:23
| 288,446,097
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
import uuid
from django.db import transaction
from customers.models import Customer
@receiver(post_save, sender=Customer)
def post_save_create_customer_number_and_customer_number(sender, instance, created, **kwargs):
if created:
instance.create_customer_account()
if instance.customer_number == '':
instance.customer_number = str(uuid.uuid4()).replace("-", '').upper()[:20]
instance.save()
|
[
"baridzimaximillem@gmail.com"
] |
baridzimaximillem@gmail.com
|
33013989259884ab0ed306b1a8ffd64725df92f6
|
7c009d77bc0124b69abdd5bbf4d00ee00a6de881
|
/process/migrations/0020_auto_20210606_1321.py
|
23a2cb9944ae79b25e63e50e2bb315ad1da36180
|
[] |
no_license
|
Rajeshwari33/POProcess
|
85598b3bb78c1bcc3bea583fcd106fd32eb97c99
|
dde399029b01554f97988709688e14193a96cb1a
|
refs/heads/master
| 2023-05-25T18:33:45.589819
| 2021-06-15T16:27:37
| 2021-06-15T16:27:37
| 367,557,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 866
|
py
|
# Generated by Django 3.2 on 2021-06-06 07:51
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('process', '0019_mailcredentials'),
]
operations = [
migrations.AddField(
model_name='mailcredentials',
name='created_by',
field=models.PositiveSmallIntegerField(null=True, verbose_name='User Id'),
),
migrations.AddField(
model_name='mailcredentials',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created Date'),
),
migrations.AddField(
model_name='mailcredentials',
name='is_active',
field=models.BooleanField(default=True, verbose_name='Active ?'),
),
]
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.