blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d72629e2166ef7142a7423e4d47ebcc5b93f571
|
869b8c7a526ebfbe6b55832ce9f081cd0218a4f5
|
/onconet/models/spatial_transformers/factory.py
|
0ad65b9657f472eb0d7eaebe27c8ff71d8b3ee59
|
[
"MIT"
] |
permissive
|
yala/Mirai
|
54d1ab1496d35c05553cfe1c255e7c3012462ce4
|
12bace8fd6ce9c5bb129fd0d30a46a00a2f7b054
|
refs/heads/master
| 2023-04-29T11:12:28.853712
| 2023-02-24T21:28:20
| 2023-02-24T21:28:20
| 315,745,008
| 66
| 23
|
MIT
| 2022-02-07T20:49:05
| 2020-11-24T20:29:22
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
SPATIAL_TRANSFORMER_REGISTRY = {}
NO_SPATIAL_TRANSFORMER_ERR = 'Pool {} not in SPATIAL_TRANSFORMER! Available spatial transformers are {}'
def RegisterSpatialTransformer(st_name):
"""Registers a pool."""
def decorator(f):
SPATIAL_TRANSFORMER_REGISTRY[st_name] = f
return f
return decorator
def get_spatial_transformer(st_name):
"""Get pool from POOL_REGISTRY based on pool_name."""
if not st_name in SPATIAL_TRANSFORMER_REGISTRY:
raise Exception(NO_SPATIAL_TRANSFORMER_ERR.format(
pool_name, SPATIAL_TRANSFORMER_REGISTRY.keys()))
spatial_transformer = SPATIAL_TRANSFORMER_REGISTRY[st_name]
return spatial_transformer
|
[
"adamyala@csail.mit.edu"
] |
adamyala@csail.mit.edu
|
0331644aa9c6ce4d3b15eb5d286fa083f49458af
|
4723d9818d8b52bcfa2315a59ceb4acf1731b761
|
/pysgg/engine/inference.py
|
7b71c8bd65ef28cc62751bb7f02222daf39f8a96
|
[
"MIT",
"Python-2.0"
] |
permissive
|
rafa-cxg/PySGG
|
fe8b34157438d73e7a91a846a3428f411a9b2535
|
5b758cd811e81cd47781fb4028011a012d91fcff
|
refs/heads/main
| 2023-08-30T09:22:04.937170
| 2021-10-29T02:31:41
| 2021-10-29T02:31:41
| 425,873,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,090
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import torch
from tqdm import tqdm
from pysgg.config import cfg
from pysgg.data.datasets.evaluation import evaluate
from .bbox_aug import im_detect_bbox_aug
from ..utils.comm import all_gather
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
def compute_on_dataset(model, data_loader, device, synchronize_gather=True, timer=None, logger=None):
"""
:param model:
:param data_loader:
:param device:
:param synchronize_gather: gather the predictions during the training,
rather than gathering all predictions after the training
:param timer:
:return:
"""
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for _, batch in enumerate(tqdm(data_loader)):
with torch.no_grad():
images, targets, image_ids = batch
targets = [target.to(device) for target in targets]
if timer:
timer.tic()
if cfg.TEST.BBOX_AUG.ENABLED:
output = im_detect_bbox_aug(model, images, device)
else:
# relation detection needs the targets
output = model(images.to(device), targets, logger=logger)
if timer:
if not cfg.MODEL.DEVICE == 'cpu':
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
if synchronize_gather:
synchronize()
multi_gpu_predictions = all_gather({img_id: result for img_id, result in zip(image_ids, output)})
if is_main_process():
for p in multi_gpu_predictions:
results_dict.update(p)
else:
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu, synchronize_gather=True):
if not synchronize_gather:
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
if synchronize_gather:
predictions = predictions_per_gpu
else:
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("pysgg.inference")
logger.warning(
"WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!"
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
logger.info(f"len(image_ids) {len(image_ids)}, image_ids[-1] + 1 {image_ids[-1] + 1}")
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def inference(
cfg,
model,
data_loader,
dataset_name,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
logger=None,
):
load_prediction_from_cache = cfg.TEST.ALLOW_LOAD_FROM_CACHE and output_folder is not None and os.path.exists(
os.path.join(output_folder, "eval_results.pytorch"))
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
if logger is None:
logger = logging.getLogger("pysgg.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
if load_prediction_from_cache:
logging.info("load_prediction_from_cache: " + os.path.join(output_folder, "eval_results.pytorch"))
predictions = torch.load(os.path.join(output_folder, "eval_results.pytorch"),
map_location=torch.device("cpu"))['predictions']
else:
predictions = compute_on_dataset(model, data_loader, device,
synchronize_gather=cfg.TEST.RELATION.SYNC_GATHER,
timer=inference_timer, logger=logger)
# wait for all processes to complete before measuring the time
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
)
)
if not load_prediction_from_cache:
predictions = _accumulate_predictions_from_multiple_gpus(predictions,
synchronize_gather=cfg.TEST.RELATION.SYNC_GATHER)
if not is_main_process():
return -1.0
# if output_folder is not None and not load_prediction_from_cache:
# torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(cfg=cfg,
dataset=dataset,
predictions=predictions,
output_folder=output_folder,
logger=logger,
**extra_args)
|
[
"limo97@163.com"
] |
limo97@163.com
|
c8db93ac8b84069eaa3db4066fd55c60f660c841
|
9249947c07f8addf64dd3d2a2f9f37d379f83921
|
/libs/gluon/contrib/generics.py
|
abaa95f64160cec56f5ab445a32e92a16a8ff4fd
|
[
"MIT"
] |
permissive
|
operepo/ope
|
eb71aa763d157416009d7c3052ace11852660e0a
|
018c82af46845315795c67c36801e2a128f515d5
|
refs/heads/master
| 2023-08-08T15:05:28.592589
| 2023-07-25T00:22:24
| 2023-07-25T00:22:24
| 96,855,111
| 12
| 11
|
MIT
| 2023-03-03T15:10:34
| 2017-07-11T05:42:14
|
Perl
|
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
# fix response
import os
from gluon import current, HTTP
from gluon.html import markmin_serializer, TAG, HTML, BODY, UL, XML, H1
from gluon.contrib.fpdf import FPDF, HTMLMixin
from gluon.sanitizer import sanitize
from gluon.contrib.markmin.markmin2latex import markmin2latex
from gluon.contrib.markmin.markmin2pdf import markmin2pdf
def wrapper(f):
def g(data):
try:
output = f(data)
return XML(ouput)
except (TypeError, ValueError), e:
raise HTTP(405, '%s serialization error' % e)
except ImportError, e:
raise HTTP(405, '%s not available' % e)
except Exception, e:
raise HTTP(405, '%s error' % e)
return g
def latex_from_html(html):
markmin = TAG(html).element('body').flatten(markmin_serializer)
return XML(markmin2latex(markmin))
def pdflatex_from_html(html):
if os.system('which pdflatex > /dev/null') == 0:
markmin = TAG(html).element('body').flatten(markmin_serializer)
out, warnings, errors = markmin2pdf(markmin)
if errors:
current.response.headers['Content-Type'] = 'text/html'
raise HTTP(405, HTML(BODY(H1('errors'),
UL(*errors),
H1('warnings'),
UL(*warnings))).xml())
else:
return XML(out)
def pyfpdf_from_html(html):
request = current.request
def image_map(path):
if path.startswith('/%s/static/' % request.application):
return os.path.join(request.folder, path.split('/', 2)[2])
return 'http%s://%s%s' % (request.is_https and 's' or '', request.env.http_host, path)
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
# pyfpdf needs some attributes to render the table correctly:
html = sanitize(
html, allowed_attributes={
'a': ['href', 'title'],
'img': ['src', 'alt'],
'blockquote': ['type'],
'td': ['align', 'bgcolor', 'colspan', 'height', 'width'],
'tr': ['bgcolor', 'height', 'width'],
'table': ['border', 'bgcolor', 'height', 'width'],
}, escape=False)
pdf.write_html(html, image_map=image_map)
return XML(pdf.output(dest='S'))
def pdf_from_html(html):
# try use latex and pdflatex
if os.system('which pdflatex > /dev/null') == 0:
return pdflatex_from_html(html)
else:
return pyfpdf_from_html(html)
|
[
"ray@cmagic.biz"
] |
ray@cmagic.biz
|
b3b956cf8f2482a45cd555f202e06a02b98b7d41
|
5f61724fc5cad3f82094a681c853cc9f0337f050
|
/test/test_section.py
|
41d67a7de2c3641cf36ab6ae71a3a5eccb98bd42
|
[
"Apache-2.0"
] |
permissive
|
barseghyanartur/odfdo
|
2cecbbbb33f23d5ed0ba80cb9208a8e7857b93a0
|
e628a9e9daa40319a777d216ec7ebca4057b3344
|
refs/heads/master
| 2022-11-17T15:43:15.662484
| 2020-06-27T00:41:38
| 2020-06-28T22:53:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,612
|
py
|
#!/usr/bin/env python
# Copyright 2018 Jérôme Dumonteil
# Copyright (c) 2009-2010 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): jerome.dumonteil@gmail.com
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: Hervé Cauwelier <herve@itaapy.com>
from unittest import TestCase, main
from odfdo.document import Document
from odfdo.section import Section
class TestSection(TestCase):
def setUp(self):
self.document = document = Document('samples/base_text.odt')
self.body = document.body
def test_create_simple_section(self):
"""The idea is to test only with the mandatory arguments (none
in this case), not to test odf_create_element which is done in
test_xmlpart.
"""
element = Section()
excepted = '<text:section/>'
self.assertEqual(element.serialize(), excepted)
def test_create_complex_section(self):
"""The idea is to test with all possible arguments. If some arguments
are contradictory or trigger different behaviours, test all those
combinations separately.
"""
element = Section(style='Standard')
excepted = '<text:section text:style-name="Standard"/>'
self.assertEqual(element.serialize(), excepted)
def test_get_section_list(self):
body = self.body
sections = body.get_sections()
self.assertEqual(len(sections), 2)
second = sections[1]
name = second.name
self.assertEqual(name, "Section2")
def test_get_section_list_style(self):
body = self.body
sections = body.get_sections(style='Sect1')
self.assertEqual(len(sections), 2)
section = sections[0]
name = section.name
self.assertEqual(name, "Section1")
def test_get_section(self):
body = self.body
section = body.get_section(position=1)
name = section.name
self.assertEqual(name, "Section2")
if __name__ == '__main__':
main()
|
[
"jerome.dumonteil@gmail.com"
] |
jerome.dumonteil@gmail.com
|
df94816cf1d341645c00813001ccbbdc695412c4
|
a363b1ad911b8c989e578b5a4a412c1dd615cc39
|
/toontown/building/ToonInteriorColors.py
|
56f103d58606c8a93f75fab6679a53c759bd7641
|
[
"Apache-2.0"
] |
permissive
|
OSToontown/Project-Altis-Alpha
|
2999e944c44e0409cb19e277da61807bfa871e86
|
3a542b5d19784e9c4a5b893e88617e5280b213dd
|
refs/heads/master
| 2023-06-26T12:12:35.073103
| 2021-07-24T17:20:43
| 2021-07-24T17:20:43
| 248,406,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
from toontown.toonbase.ToontownGlobals import *
wainscottingBase = [Vec4(0.8, 0.5, 0.3, 1.0), Vec4(0.699, 0.586, 0.473, 1.0), Vec4(0.473, 0.699, 0.488, 1.0)]
wallpaperBase = [Vec4(1.0, 1.0, 0.7, 1.0),
Vec4(0.8, 1.0, 0.7, 1.0),
Vec4(0.4, 0.5, 0.4, 1.0),
Vec4(0.5, 0.7, 0.6, 1.0)]
wallpaperBorderBase = [Vec4(1.0, 1.0, 0.7, 1.0),
Vec4(0.8, 1.0, 0.7, 1.0),
Vec4(0.4, 0.5, 0.4, 1.0),
Vec4(0.5, 0.7, 0.6, 1.0)]
doorBase = [Vec4(1.0, 1.0, 0.7, 1.0)]
floorBase = [Vec4(0.746, 1.0, 0.477, 1.0), Vec4(1.0, 0.684, 0.477, 1.0)]
baseScheme = {'TI_wainscotting': wainscottingBase,
'TI_wallpaper': wallpaperBase,
'TI_wallpaper_border': wallpaperBorderBase,
'TI_door': doorBase,
'TI_floor': floorBase}
colors = {DonaldsDock: {'TI_wainscotting': wainscottingBase,
'TI_wallpaper': wallpaperBase,
'TI_wallpaper_border': wallpaperBorderBase,
'TI_door': doorBase,
'TI_floor': floorBase},
ToontownCentral: {'TI_wainscotting': wainscottingBase,
'TI_wallpaper': wallpaperBase,
'TI_wallpaper_border': wallpaperBorderBase,
'TI_door': doorBase + [Vec4(0.8, 0.5, 0.3, 1.0)],
'TI_floor': floorBase},
TheBrrrgh: baseScheme,
MinniesMelodyland: baseScheme,
DaisyGardens: baseScheme,
GoofySpeedway: baseScheme,
DonaldsDreamland: {'TI_wainscotting': wainscottingBase,
'TI_wallpaper': wallpaperBase,
'TI_wallpaper_border': wallpaperBorderBase,
'TI_door': doorBase,
'TI_floor': floorBase},
Tutorial: {'TI_wainscotting': wainscottingBase,
'TI_wallpaper': wallpaperBase,
'TI_wallpaper_border': wallpaperBorderBase,
'TI_door': doorBase + [Vec4(0.8, 0.5, 0.3, 1.0)],
'TI_floor': floorBase},
MyEstate: baseScheme}
|
[
"anythingtechpro@gmail.com"
] |
anythingtechpro@gmail.com
|
13e109703253a9f3a1da4c8dd08d3e4292e6bbd9
|
cfb76fefdf3d991ca516d10ee04afda061fd9b7f
|
/tests/test_pcolormesh.py
|
5363db32fb91669dcb3d1c11edbb4d67b15e1858
|
[
"MIT"
] |
permissive
|
chebee7i/prettyplotlib
|
77d7fd3941877d694b4237850cfa75605a2954d7
|
68841f0156e29eec4fc76c53407e67206287b861
|
refs/heads/master
| 2021-01-20T23:11:57.745272
| 2013-10-06T20:04:12
| 2013-10-06T20:04:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,589
|
py
|
__author__ = 'olga'
from matplotlib.testing.decorators import image_comparison
import prettyplotlib as ppl
from prettyplotlib import plt
import numpy as np
import os
import string
from prettyplotlib import brewer2mpl
from matplotlib.colors import LogNorm
@image_comparison(baseline_images=['pcolormesh'], extensions=['png'])
def test_pcolormesh():
fig, ax = plt.subplots(1)
np.random.seed(10)
ppl.pcolormesh(fig, ax, np.random.randn(10, 10))
# fig.savefig('%s/baseline_images/test_pcolormesh/pcolormesh.png' %
# os.path.dirname(__file__))
@image_comparison(baseline_images=['pcolormesh_labels'], extensions=['png'])
def test_pcolormesh_labels():
fig, ax = plt.subplots(1)
np.random.seed(10)
ppl.pcolormesh(fig, ax, np.random.randn(10, 10),
xticklabels=string.uppercase[:10],
yticklabels=string.lowercase[-10:])
# fig.savefig('%s/baseline_images/test_pcolormesh/pcolormesh_labels.png' %
# os.path.dirname(__file__))
@image_comparison(baseline_images=['pcolormesh_positive'], extensions=['png'])
def test_pcolormesh_positive():
fig, ax = plt.subplots(1)
np.random.seed(10)
ppl.pcolormesh(fig, ax, np.random.uniform(size=(10, 10)),
xticklabels=string.uppercase[:10],
yticklabels=string.lowercase[-10:])
# fig.savefig('%s/baseline_images/test_pcolormesh/pcolormesh_positive.png' %
# os.path.dirname(__file__))
@image_comparison(baseline_images=['pcolormesh_negative'], extensions=['png'])
def test_pcolormesh_negative():
fig, ax = plt.subplots(1)
np.random.seed(10)
ppl.pcolormesh(fig, ax, -np.random.uniform(size=(10, 10)),
xticklabels=string.uppercase[:10],
yticklabels=string.lowercase[-10:])
# fig.savefig('%s/baseline_images/test_pcolormesh/pcolormesh_negative.png' %
# os.path.dirname(__file__))
@image_comparison(baseline_images=['pcolormesh_other_cmap'], extensions=['png'])
def test_pcolormesh_other_cmap():
purple_green = brewer2mpl.get_map('PRGn', 'diverging', 11).mpl_colormap
fig, ax = plt.subplots(1)
np.random.seed(10)
ppl.pcolormesh(fig, ax, np.random.randn(10, 10), cmap=purple_green)
# fig.savefig('%s/baseline_images/test_pcolormesh/pcolormesh_other_cmap.png' %
# os.path.dirname(__file__))
@image_comparison(baseline_images=['pcolormesh_positive_other_cmap'],
extensions=['png'])
def test_pcolormesh_positive_other_cmap():
red_purple = brewer2mpl.get_map('RdPu', 'sequential', 8).mpl_colormap
fig, ax = plt.subplots(1)
np.random.seed(10)
ppl.pcolormesh(fig, ax, np.random.uniform(size=(10, 10)),
xticklabels=string.uppercase[:10],
yticklabels=string.lowercase[-10:],
cmap=red_purple)
# fig.savefig(
# '%s/baseline_images/test_pcolormesh/pcolormesh_positive_other_cmap.png' %
# os.path.dirname(__file__))
@image_comparison(baseline_images=['pcolormesh_lognorm'],
extensions=['png'])
def test_pcolormesh_lognorm():
fig, ax = plt.subplots(1)
np.random.seed(10)
x = np.abs(np.random.randn(10, 10))
ppl.pcolormesh(fig, ax, x,
norm=LogNorm(vmin=x.min().min(), vmax=x.max().max()))
# fig.savefig('%s/baseline_images/test_pcolormesh/test_pcolormesh_lognorm.png' %
# os.path.dirname(__file__))
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'])
|
[
"olga.botvinnik@gmail.com"
] |
olga.botvinnik@gmail.com
|
a32e00000f109f3f2e8079952c3278071e27cf0f
|
00c7bd96f1afab807746f1f7f013d4aadc5f6a6e
|
/sakura/common/types.py
|
32cb4d9e52fd22a39835c2c8c60f49825f3f0bb7
|
[] |
no_license
|
sakura-team/sakura
|
350ae27bdf5c3e7c338c04ec33fb50f4cdc7bfb4
|
306bfe82ffd6b204b0b574bb7f75b35712a3202f
|
refs/heads/master
| 2021-06-02T01:30:14.294572
| 2021-03-04T10:16:44
| 2021-03-04T10:16:44
| 61,307,818
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,271
|
py
|
import numpy as np
from sakura.common.errors import APIRequestError
# Strings whose length is known to be lower than NUMPY_EMBEDDED_STR_MAX_LEN
# will be encoded directly in numpy arrays.
# Others will be saved as an object pointer in numpy arrays.
NUMPY_EMBEDDED_STR_MAX_LEN = 16
SAKURA_INTEGER_TYPES = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64')
SAKURA_FLOATING_TYPES = ('float32', 'float64')
SAKURA_NUMERIC_TYPES = SAKURA_INTEGER_TYPES + SAKURA_FLOATING_TYPES
SAKURA_NUMPY_TYPES = SAKURA_NUMERIC_TYPES + ('bool',)
def sakura_type_to_np_dtype(col_type, **params):
if col_type == 'date':
return np.dtype('float64')
if col_type == 'opaque':
return np.dtype(object)
if col_type in ('string', 'geometry'):
max_len = params.get('max_length')
if max_len is not None and max_len < NUMPY_EMBEDDED_STR_MAX_LEN:
return np.dtype(('str', max_len))
else:
return np.dtype(object)
if col_type in SAKURA_NUMPY_TYPES:
return np.dtype(col_type)
raise NotImplementedError('Do not know how to translate sakura type %s to a numpy dtype.' % repr(col_type))
def np_dtype_to_sakura_type(dt):
if dt.name in SAKURA_NUMPY_TYPES:
return dt.name, {}
if dt.name == 'object':
return 'opaque', {}
if dt.type == np.str_:
length_chars = str(dt).strip('<>U')
if length_chars == '':
max_length = 0
else:
max_length = int(length_chars)
if (max_length == 0):
return 'string', {} # unknown length
else:
return 'string', { 'max_length': max_length }
raise NotImplementedError('Do not know how to translate %s to a sakura type.' % repr(dt))
def verify_sakura_type_conversion(old_type, new_type):
if (old_type, new_type) not in (
('opaque', 'string'),
('opaque', 'geometry'),
('string', 'geometry'),
('float64', 'date')):
raise APIRequestError("Cannot convert sakura type '%s' to '%s'!", (old_type, new_type))
def is_numeric_type(sakura_type):
return sakura_type in SAKURA_NUMERIC_TYPES
def is_floating_type(sakura_type):
return sakura_type in SAKURA_FLOATING_TYPES
|
[
"etienne.duble@imag.fr"
] |
etienne.duble@imag.fr
|
614462b6940c9c08b08c24650c5683c4986c8d17
|
42d58b23f446a48907d965794a2ae1dc4ad751ab
|
/347. Top K Frequent Elements.py
|
4d70eebffd7020a5f6c65cb4f2b11935dad21ace
|
[] |
no_license
|
AsadullahFarooqi/LeetCode
|
fabec1cad1781d0300cec2931545b92dd1390900
|
aecc4efe8e0561aa4dd8a8b7f755c19982c6c2ef
|
refs/heads/master
| 2022-11-03T08:01:47.656348
| 2022-10-11T06:19:56
| 2022-10-11T06:19:56
| 187,672,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
"""
Given a non-empty array of integers, return the k most frequent elements.
Example 1:
Input: nums = [1,1,1,2,2,3], k = 2
Output: [1,2]
Example 2:
Input: nums = [1], k = 1
Output: [1]
Note:
You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
Your algorithm's time complexity must be better than O(n log n), where n is the array's size.
"""
def topKFrequent(nums, k):
"""The algorithm works in the following steps
1 - It makes an hash table to store the # of appearence
2 - Sorting the hash table keys by their values in reverse order
3 - Returning the first k values
Args:
nums (TYPE): Description
k (TYPE): Description
Returns:
TYPE: Description
"""
# step 1
count_hash = {}
for i in nums:
if i in count_hash:
count_hash[i] += 1
continue
count_hash[i] = 1
# step 2
count_hash = sorted(count_hash, reverse=True, key=lambda item: count_hash[item])
# steop 3
return count_hash[:k]
if __name__ == '__main__':
# n = [1,1,1,2,2,3]?
# n = [1]
n = [3,0,1,0]
k = 1
print(topKFrequent(n, k))
|
[
"asadullah.itcgcs@gmail.com"
] |
asadullah.itcgcs@gmail.com
|
5980dfe80dbe7976918aa72251a6196f00d24561
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/420/usersdata/329/88113/submittedfiles/exe11.py
|
fcae8958e71f64e9a6bacbbd05f96f381947b027
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
n = int(input("digite um numero com 8 algarismos: "))
soma = 0
while n < 100000000:
resto = n % 10
n = (n - resto)/10
soma = soma + resto
print ('%d' % soma)
while n > 999999999:
resto = n % 10
n = (n - resto)/10
soma = soma + resto
print ('%d' % soma)
while n > 100000000:
print('NAO SEI')
while n < 99999999:
print('NAO SEI')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5300271b0b676978f2319aff708095962e6f6c52
|
49cc32d5859e9002cb4b94ade25d72f5f4fe1612
|
/CLASE5_PYTHON_UMAKER/codigo5.py
|
52b8bdabb03966ebe2a3f86723ba2f2f85b85de5
|
[] |
no_license
|
jorgepdsML/DIGITAL-IMAGE-PROCESSING-PYTHON
|
c8441215b4cf9e912dad1885a82058c1b0bbb872
|
781c8c6d583aebda6381a301cdc33ad4d09f20c5
|
refs/heads/master
| 2021-06-26T00:06:44.344201
| 2021-01-21T17:41:36
| 2021-01-21T17:41:36
| 194,336,928
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
class point():
def __init__(self,a,b):
self.x=a
self.y=b
def coordenada(self):
print(self.x,self.y)
def __add__(self, other):
x=self.x+other.x
y=self.y+other.y
return (x,y)
def __call__(self,*args):
suma=0
for val in args:
suma=suma+val
return suma
#instanciando un nuevo objeto
a1=point(10,10)
#llamando a la función call
d=a1(100,200,1000,500,1000)
print(d)
|
[
"noreply@github.com"
] |
jorgepdsML.noreply@github.com
|
242923c5197a8ee760b120a5605b8afca943eab0
|
f99cca94f74c69bc518e298c14140534e18eabd3
|
/OrcLib/Test/TestNet.py
|
07cd62a60036cd81afff87edaf6b1fcf167c81cd
|
[] |
no_license
|
pubselenium/OrcTestToolsKit
|
d6d838d9937d2c4d86941e317cb3ff096b58e52d
|
f3ccbbceaed4f4996f6907a2f4880c2fd3f82bbb
|
refs/heads/master
| 2021-04-29T05:15:53.240714
| 2016-12-30T09:42:53
| 2016-12-30T09:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
import unittest
from OrcLib.LibTest import OrcTest
from OrcLib import init_log
from OrcLib.LibNet import OrcParameter
class TestOrcParameter(unittest.TestCase):
def test_send_para(self):
"""
Test get exist option
"""
OrcTest.test_print_begin()
init_log()
_para_01 = OrcParameter().send_para("abc")
OrcTest.test_print_result("Parameter para_01 is: %s, type is %s" % (_para_01, type(_para_01)))
_para_02 = OrcParameter().send_para(["abc", "def"])
OrcTest.test_print_result("Parameter para_02 is: %s, type is %s" % (_para_02, type(_para_02)))
_para_03 = OrcParameter().send_para(None)
OrcTest.test_print_result("Parameter para_03 is: %s, type is %s" % (_para_03, type(_para_03)))
_para_04 = OrcParameter().send_para(120)
OrcTest.test_print_result("Parameter para_04 is: %s, type is %s" % (_para_04, type(_para_04)))
OrcTest.test_print_end()
def test_save_pic(self):
"""
Test get exist option
"""
OrcTest.test_print_begin()
from OrcLib.LibNet import OrcHttpService
service = OrcHttpService("Driver")
service.save_pic("abc.png")
OrcTest.test_print_end()
def test_source_list(self):
"""
Test get exist option
"""
OrcTest.test_print_begin()
from OrcLib.LibNet import OrcResource
from OrcLib.LibNet import OrcResult
resource = OrcResource("BatchDef", "JSON")
result = resource.get(parameter=dict())
if isinstance(result, OrcResult):
OrcTest.test_print_result(result.status, "status")
OrcTest.test_print_result(result.message, "message")
OrcTest.test_print_result(result.data, "data")
else:
print result
OrcTest.test_print_end()
def test_source_sig(self):
"""
Test get exist option
"""
OrcTest.test_print_begin()
from OrcLib.LibNet import OrcResource
from OrcLib.LibNet import OrcResult
resource = OrcResource("BatchDef", "JSON")
result = resource.get(path=1000000024)
if isinstance(result, OrcResult):
OrcTest.test_print_result(result.status, "status")
OrcTest.test_print_result(result.message, "message")
OrcTest.test_print_result(result.data, "data")
else:
print result
OrcTest.test_print_end()
|
[
"orange21cn@126.com"
] |
orange21cn@126.com
|
396b07af836678cbf34f87d2c44a64e0513292ea
|
98cd5ddf45a73aea64bbfac0c0104829d7231b81
|
/T - Image + Hexagon/main.py
|
4a4ad6510a74b8e54b6218bb846ee6c486774044
|
[] |
no_license
|
atheis4/ETC_Modes_Extra
|
42508d523cfe632a3335e29f6e1e40af91df231b
|
d0ce221562105382a7a73cc6d280f4ad0eabf6f3
|
refs/heads/master
| 2022-04-04T11:15:07.335910
| 2020-01-03T20:27:32
| 2020-01-03T20:27:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
import os
import pygame
import time
import random
import glob
import pygame.gfxdraw
images = []
image_index = 0
image_x=100
image_y=100
image_size_x=100
image_size_y=100
border_x = 1
border_y = 1
square_start_x = 1
square_start_y = 1
square_end_x = 1
square_end_y = 1
square_size = 50
trigger = False
def setup(screen, etc):
global images, image_index
for filepath in sorted(glob.glob(etc.mode_root + '/Images/*.png')):
filename = os.path.basename(filepath)
print 'loading image file: ' + filename
img = pygame.image.load(filepath)
images.append(img)
def draw(screen, etc):
global trigger, image_x, image_y, image_size_x, image_size_y, images, image_index, square_size, border_x, border_y, square_start_x, square_start_y, square_end_x, square_end_y
color = etc.color_picker()
if etc.audio_trig or etc.midi_note_new :
trigger = True
if trigger == True :
image_x=(random.randrange(-50,1080))
image_y=(random.randrange(-50,600))
image_index += 1
if image_index == len(images) : image_index = 0
image = images[image_index]
image_size_x=int(image.get_width() * etc.knob1)
image_size_y=int(image.get_height() * etc.knob1)
image = pygame.transform.scale(image,(image_size_x, image_size_y))
border_x = int(etc.knob2 * image.get_width()) - (image.get_width() / 2)
border_y = int(etc.knob2 * image.get_height()) - (image.get_height() / 2)
square_start_x = image_x - border_x
square_start_y = image_y - border_y
square_end_x = image_size_x + (border_x*2)
square_end_y = image_size_y + (border_y*2)
pygame.draw.rect(screen, color, (square_start_x, square_start_y, square_end_x, square_end_y), 0)
#TOP TRIANGLE
pygame.gfxdraw.filled_trigon(screen, square_start_x, square_start_y, (square_end_x+image_x-border_x)-1, square_start_y, (image_x+image_size_x/2),square_start_y-((image_size_y+border_y*2)/2) , color)
#BOTTOM TRIGON
pygame.gfxdraw.filled_trigon(screen, square_start_x, image_y+square_end_y-border_y, (square_end_x+image_x-border_x)-1, image_y+square_end_y-border_y, (image_x+image_size_x/2),(image_y+square_end_y-border_y)+((image_size_y+border_y*2)/2) , color)
image.fill((255, 255, 255, etc.knob3 * 255), None, pygame.BLEND_RGBA_MULT)
screen.blit(image, (image_x,image_y))
trigger = False
|
[
"media@critterandguitari.com"
] |
media@critterandguitari.com
|
a971057d036c9e02983eea09d044b3cc1531cccc
|
526bf18a8695862067c817f432ab197ceb645f39
|
/migrations/versions/9e01343b62ef_cars_added_fields.py
|
4721e5e3683b75236d82cd6644df0c3fd3d99c76
|
[] |
no_license
|
sintimaski/bfs-be
|
a7fd623911a2220face49a0ef84574f3fd7a09a8
|
964a9c7e9cc876aaf8b0723d6b3f26bd378c3721
|
refs/heads/master
| 2023-08-02T09:00:44.855055
| 2021-09-22T13:07:01
| 2021-09-22T13:07:01
| 339,531,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
"""cars added fields
Revision ID: 9e01343b62ef
Revises: 172fb3a90b3b
Create Date: 2020-10-19 07:41:26.893114
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9e01343b62ef'
down_revision = '172fb3a90b3b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('car_product', sa.Column('trim', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('car_product', 'trim')
# ### end Alembic commands ###
|
[
"dimadrebezov@gmail.com"
] |
dimadrebezov@gmail.com
|
9d3fb5e9f0d13d0dac39ac54ebcd262cccdd485c
|
5219ea9d40a5e6187fc047d0e463ecca47654f72
|
/project_name/urls.py
|
baecf57ddd274ae81eaeab21df6fd7ecd4c440b1
|
[] |
no_license
|
wo0dyn/django-project-template
|
b5bb7ffec3a0ecd90df34fc60b6c13422e7f9de1
|
68a0eec61a09486b662cbdf72b13cd5c7b476810
|
refs/heads/master
| 2021-01-17T07:24:24.012032
| 2013-06-07T08:22:49
| 2013-06-07T08:22:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.http import HttpResponse, HttpResponsePermanentRedirect
from ratelimitbackend import admin
admin.autodiscover()
robots = lambda _: HttpResponse('User-agent: *\nDisallow:\n',
mimetype='text/plain')
favicon = lambda _: HttpResponsePermanentRedirect(
'{0}core/img/favicon.png'.format(settings.STATIC_URL)
)
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('ratelimitbackend.views',
url(r'^login/$', 'login', name='login'),
url(r'^robots.txt$', robots),
url(r'^favicon.ico$', favicon),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"buburno@gmail.com"
] |
buburno@gmail.com
|
d308e86366cb8b7a3aace35c26d3ce733fd7b08a
|
3c8701e04900389adb40a46daedb5205d479016c
|
/oldboy-python18/day08-接口-网络/self/网络编程/06-模拟ssh-加上报头/服务端.py
|
c4da2e73dc660420a2babf440c3e5581b3ee967d
|
[] |
no_license
|
huboa/xuexi
|
681300653b834eaf506f49987dcca83df48e8db7
|
91287721f188b5e24fbb4ccd63b60a80ed7b9426
|
refs/heads/master
| 2020-07-29T16:39:12.770272
| 2018-09-02T05:39:45
| 2018-09-02T05:39:45
| 73,660,825
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
####建立连接
import socket
import struct
import subprocess
phone=socket.socket(socket.AF_INET,socket.SOCK_STREAM)###tcp
phone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) ###复用
phone.bind(('127.0.0.1',8080))
phone.listen(5)
print('server start...')
while True: ###连接循环
conn,client_addr=phone.accept()
print(conn,client_addr)
###基于建立的连接,收发消息
while True:
try:
cmd=conn.recv(1024)
if not cmd:break ###针对对linux异常断开就跳出
print('cmd',cmd)
res=subprocess.Popen(cmd.decode('utf-8'),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout=res.stdout.read()
stderr=res.stderr.read()
##先发报头(固定长度)
header=struct.pack('i',len(stdout)+len(stderr))
conn.send(header)
##再发真实数据
conn.send(stdout)
conn.send(stderr)
except Exception: ##针对windows异常跳出
break
##挂电话
conn.close()
###关机
phone.close()
|
[
"wxcr11@gmail.com"
] |
wxcr11@gmail.com
|
0de2e57e651606fa39a419b990f8d4e0e9f98820
|
afd74aa3e8c847d557828115f48f60f696fdfe95
|
/C38/validate_getattribute.py
|
9e557d5b18eea803ad61c04b81201237089827d8
|
[
"MIT"
] |
permissive
|
BetTom/learningpython
|
f1b957607f92b4acf66aba1d22090f519824822a
|
47e78041e519ecd2e00de1b32f6416b56ce2616c
|
refs/heads/master
| 2021-10-11T09:45:40.608420
| 2019-01-24T09:44:05
| 2019-01-24T09:44:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
class CardHolder(object):
acctlen = 8
retireage = 59.5
def __init__(self, acct, name, age, addr):
self.acct = acct
self.name = name
self.age = age
self.addr = addr
def __getattribute__(self, name):
superget = object.__getattribute__
if name == 'acct':
return superget(self, 'acct')[:-3] + '***'
elif name == 'remain':
return superget(self, 'retireage') - superget(self, 'age')
else:
return superget(self, name)
def __setattr__(self, name, value):
if name == 'name':
value = value.lower().replace(' ', '_')
elif name == 'age':
if value < 0 or value > 150:
raise ValueError('invalid age')
elif name == 'acct':
value = value.replace('-', '')
if len(value) != self.acctlen:
raise TypeError('invalid acct number')
elif name == 'remain':
raise TypeError('cannot set remain')
self.__dict__[name] = value
# object.__setattr__(self, name, value)
|
[
"jpch89@outlook.com"
] |
jpch89@outlook.com
|
9f2a946202864a07e3ec0b8b972e50a4b51e4222
|
1803b6d5b6cd28f6719c2584f28d581811526d26
|
/p57_longerthan_specified.py
|
81fde2b4965d7b30b5967916920792216a7137a5
|
[] |
no_license
|
augustedupin123/python_practice
|
0ee2ebd30810f8df82d9e26b8d52328d052e1a5e
|
5ba1f9e4598d1eaa7f5f6f36efb5f96ca4be18a0
|
refs/heads/master
| 2022-12-08T06:15:48.808986
| 2020-08-31T19:16:15
| 2020-08-31T19:16:15
| 266,285,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
#Write a python program to find the list of words that are longer
#than n from a given list of words.
def list_of_words(l,n):
listreq = []
l1 = l.split()
for i in l1:
if len(i)>n:
listreq.append(i)
return listreq
a = input('enter the list')
n1 = int(input('enter n'))
print (list_of_words(a,n1))
|
[
"bhardwajrahul100@gmail.com"
] |
bhardwajrahul100@gmail.com
|
82f465c11b316b7121d832c85659e050bd9a19b4
|
978c9a1dd27a30b32eceed7f1518a26292695891
|
/python/2021/codewars/calculateinversions.py
|
513e292b0b59d1a782154f9bfaeb3538c3fe3baa
|
[] |
no_license
|
detcitty/100DaysOfCode
|
4da3407bdc4170f9d042f49e6c94a8469f8808f5
|
a3d989ea56491f89ece5191d5246166ca01d2602
|
refs/heads/master
| 2023-08-09T04:45:51.842305
| 2023-07-21T17:02:08
| 2023-07-21T17:02:08
| 178,976,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
# https://www.codewars.com/kata/537529f42993de0e0b00181f/train/python
from itertools import combinations
def count_inversions(array):
locations = []
for count, value in enumerate(array):
idx = value - 1
diff = idx - count
locations.append(diff)
list_combos = list(combinations(array, 2))
# Find the adjacent
# Try to sort the list and count the number of times it was sorted
for i in range(len(list_combos)):
pass
return(list_combos)
test1 = [1, 2, 3, 4] # => 0 inversions
test2 = [1, 3, 2, 4] # => 1 inversion: 2 and 3
test3 = [4, 1, 2, 3] # => 3 inversions: 4 and 1, 4 and 2, 4 and 3
test4 = [4, 3, 2, 1] # => 6 inversions: 4 and 3, 4 and 2, 4 and 1, 3 and 2, 3 and 1, 2 and 1
test5 = [5, 4, 3, 2, 1] # => 6 inversions: 4 and 3, 4 and 2, 4 and 1, 3 and 2, 3 and 1, 2 and 1
print(count_inversions(test1))
|
[
"devin.etcitty@gmail.com"
] |
devin.etcitty@gmail.com
|
eb96d7ba59e15da0b0f51e76d65639b8b35c5cc1
|
1d277498f96998cbbdc475db17191b7d6dc371ab
|
/rap/management/commands/play.py
|
24ebadfb8155bc29f96f1cdd8ee0b3cd3017fe27
|
[] |
no_license
|
everythingability/rap
|
cfaccfbac75b7ff2522fc9bc7debb0fd3eec3559
|
44e550f1ca0ef68c1277d9904bd546c52d51a3e5
|
refs/heads/master
| 2022-12-09T07:52:27.961493
| 2020-03-18T19:11:23
| 2020-03-18T19:11:23
| 248,318,782
| 0
| 0
| null | 2022-12-08T03:50:01
| 2020-03-18T19:02:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,544
|
py
|
from django.core.management.base import BaseCommand, CommandError
from rap.models import Project, GTRCategory, HECategory, HEResearchArea, Person, Organisation
import os, sys
import csv, json
cats =["Archaeological Theory",
"Archaeology Of Human Origins",
"Archaeology of Literate Soc.",
"Architecture HTP",
"Environmental planning",
"Heritage Management",
"Landscape & Environ. Archaeol.",
"Prehistoric Archaeology",
"Science-Based Archaeology"]
def fixDate(s): # 01/02/2020 to YYYY-MM-DD
try:
if s !=None:
dItems = s.split("/")
year = dItems[2]
month = dItems[1]
day = dItems[0]
d = f"{year}-{month}-{day}"
return d
else:
return None
except:
return None
dir_path = os.path.dirname(os.path.realpath(__file__))
class Command(BaseCommand):
# python manage.py import_tools file="tools.csv"
help = 'meant to help me get started, importing a lot of initial data etc'
def add_arguments(self, parser):
''#parser.add_argument('file', type=str)
def handle(self, *args, **options):
#filename = options['file']
try:
#Project, GTRCategory, HECategory, HEResearchArea, Person, Organisation
hecategories = HECategory.objects.all()
gtrs = GTRCategory.objects.all()
heresearchareas = HEResearchArea.objects.order_by('hecategory')
previous_category = None
for n,heresearcharea in enumerate(heresearchareas):
category = heresearcharea.hecategory ######### MAKE THE HEADER
if category != previous_category:
total = 0
print("\n")
print(category)
print("'" * 80)
c = 0
these_gtrs = heresearcharea.gtrs.all()
these_ids = []
for t in these_gtrs:
these_ids.append(t.id)
#print (these_ids)
for gtr in these_gtrs:
c = c + Project.objects.filter( gtrs__in=these_ids ).count()
#total = total + c
print( heresearcharea.name, c)
previous_category = category
except Exception as err:
print(str(err))
raise CommandError( print ('Error on line {}'.format(sys.exc_info()[-1].tb_lineno)))
self.stdout.write(self.style.SUCCESS('Done!'))
|
[
"="
] |
=
|
958fc768494ec3c7056fc6c7e6555e4a4a2b2dd8
|
34a633e2d60c5adf0e9f420bcc9587ac66b6766b
|
/kats/tests/models/test_stlf_model.py
|
ffe21a735545d553826036ca2afecdb1086247f7
|
[
"MIT"
] |
permissive
|
kpbMarques/Kats
|
5cdd7ac61e23218cb5588ef775ca194224abe739
|
259fdf8f80f628b44f9ee8881f112b1e9fd7a85f
|
refs/heads/master
| 2023-07-02T15:55:30.915358
| 2021-08-10T19:47:44
| 2021-08-10T19:49:02
| 394,783,804
| 1
| 0
|
MIT
| 2021-08-10T21:19:45
| 2021-08-10T21:19:45
| null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from unittest import TestCase
import pkgutil
import io
import pandas as pd
from kats.consts import TimeSeriesData
from kats.models.stlf import STLFModel, STLFParams
def load_data(file_name):
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
class testSTLFModel(TestCase):
def setUp(self):
DATA = load_data("air_passengers.csv")
DATA.columns = ["time", "y"]
self.TSData = TimeSeriesData(DATA)
DATA_daily = load_data("peyton_manning.csv")
DATA_daily.columns = ["time", "y"]
self.TSData_daily = TimeSeriesData(DATA_daily)
DATA_multi = load_data("multivariate_anomaly_simulated_data.csv")
self.TSData_multi = TimeSeriesData(DATA_multi)
def test_fit_forecast(self) -> None:
for method in ["theta", "prophet", "linear", "quadratic"]:
params = STLFParams(m=12, method=method)
m = STLFModel(self.TSData, params)
m.fit()
m.predict(steps=30)
m.predict(steps=30, include_history=True)
params = STLFParams(m=7, method="theta")
m_daily = STLFModel(self.TSData_daily, params)
m_daily.fit()
m_daily.predict(steps=30)
m.plot()
m_daily.predict(steps=30, include_history=True)
m.plot()
# test when m > the length of time series
params = STLFParams(m=10000, method="theta")
self.assertRaises(
ValueError,
STLFModel,
self.TSData_daily,
params,
)
def test_others(self) -> None:
# test param value error
self.assertRaises(
ValueError,
STLFParams,
method="random_model",
m=12,
)
params = STLFParams(m=12, method="theta")
params.validate_params()
# test model param
self.assertRaises(
ValueError,
STLFModel,
self.TSData_multi,
params,
)
# test __str__ method
m = STLFModel(self.TSData, params)
self.assertEqual(m.__str__(), "STLF")
if __name__ == "__main__":
unittest.main()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
0ecab6beb8846f90119b772c94a608c39ed4b8ea
|
f0417264adb22d064b0b83b5a24ae33208c0a62b
|
/H2TauTau/scripts/harvest_old.py
|
e4891de4db0866345f1c6c3f5b38d618c20264e6
|
[] |
no_license
|
cbernet/cmgtools-lite
|
6ae1a0bfc45ff03b14195ab0f05b353ffde9cd2e
|
359209cd4f982cd1b9d8e3cb366de32b7b46113d
|
refs/heads/htt_9_4_11_cand1_v1
| 2021-01-18T15:56:14.845371
| 2019-10-24T14:00:32
| 2019-10-24T14:00:32
| 86,693,438
| 1
| 0
| null | 2019-06-07T09:04:05
| 2017-03-30T11:09:21
|
Python
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
#!/usr/bin/env python
from CMGTools.H2TauTau.harvest.harvest_old import harvest, get_options
if __name__ == '__main__':
options, args = get_options()
src = args[0]
harvest(src,
subdir_pattern=options.subdir_pattern,
tgz_pattern=options.tgz_pattern,
apply_ff=options.apply_ff,
convert_ntuple=options.convert_ntuple)
|
[
"colin.bernet@cern.ch"
] |
colin.bernet@cern.ch
|
489a78e8ffb4d1cf110c0af54cad92b01c4d83b7
|
f7550c4964dc8f3c59dbcebe39e947bd6a264dba
|
/9. Generic Trees/take input Tree .py
|
035d0395b6c16a2194ca74782181cd193b973a60
|
[] |
no_license
|
Jashwanth-k/Data-Structures-and-Algorithms
|
db5e2e30932e0a35db578c19ae6cff9f147b7c3d
|
1ebf9986999a474cb094f3ab04616a46f2887043
|
refs/heads/main
| 2023-08-25T02:57:17.394322
| 2021-10-11T15:27:56
| 2021-10-11T15:27:56
| 402,448,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
class TreeNode:
def __init__(self,data):
self.data = data
self.children = list()
def printTreeDetailed(root):
if root is None:
return
print(root.data,end=':')
for child in root.children:
if child != None:
print(child.data,end=',')
print()
for child in root.children:
printTreeDetailed(child)
def takeinput():
print('enter root data')
rootdata = int(input())
if rootdata == -1:
return
root = TreeNode(rootdata)
print('enter no of children for:',rootdata)
for i in range(int(input())):
childNode = takeinput()
root.children.append(childNode)
return root
root = takeinput()
printTreeDetailed(root)
|
[
"noreply@github.com"
] |
Jashwanth-k.noreply@github.com
|
bbbb98922649b61e90795c6fd283613ad91677fd
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayTransferThirdpartyBillCreateResponse.py
|
5eec748f112c295bae2984605e0dddffe8587281
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayTransferThirdpartyBillCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayTransferThirdpartyBillCreateResponse, self).__init__()
self._order_id = None
self._order_type = None
self._payment_id = None
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def payment_id(self):
return self._payment_id
@payment_id.setter
def payment_id(self, value):
self._payment_id = value
def parse_response_content(self, response_content):
response = super(AlipayTransferThirdpartyBillCreateResponse, self).parse_response_content(response_content)
if 'order_id' in response:
self.order_id = response['order_id']
if 'order_type' in response:
self.order_type = response['order_type']
if 'payment_id' in response:
self.payment_id = response['payment_id']
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
db8858d3d0b03c9346f4b028be2f3a4fc6c900e7
|
37db56765276c0835a2c7e3955c412ce204836c1
|
/1732.py
|
a3221be6890f5768a8a8d1a01f9b713a2f3c54bd
|
[] |
no_license
|
supperllx/LeetCode
|
9d0a3a7258d1cff6afa6e77f61a2e697834914ca
|
df3a589ea858218f689fe315d134adc957c3debd
|
refs/heads/master
| 2023-05-01T06:57:17.403568
| 2021-05-19T18:29:25
| 2021-05-19T18:34:03
| 288,351,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
class Solution:
def largestAltitude(self, gain: List[int]) -> int:
curHeight = 0
maxHeight = 0
for g in gain:
curHeight += g
maxHeight = max(maxHeight, curHeight)
return maxHeight
|
[
"supperllx@outlook.com"
] |
supperllx@outlook.com
|
4c2f052e47f331249f8d010f61215fab0048cba4
|
4f2f71beee2fb016550598996e100ce176100dcb
|
/python/etl/etl.py
|
9bbd5ca4f54b1b3e9ae65cbba66894797f2bf174
|
[] |
no_license
|
charles-wangkai/exercism
|
d2723bd160573b2d3ee9051ff63972e5be900d87
|
c283a5078e3d0f05ff3d86b2c208ae086d3896a4
|
refs/heads/master
| 2023-05-11T13:11:23.776323
| 2023-04-30T17:40:56
| 2023-04-30T17:40:56
| 102,832,444
| 2
| 4
| null | 2020-03-14T15:49:13
| 2017-09-08T07:31:36
|
C++
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
def transform(legacy_data):
return {letter.lower(): score
for score, letters in legacy_data.items()
for letter in letters}
|
[
"charles.wangkai@gmail.com"
] |
charles.wangkai@gmail.com
|
35488866c24bd360ea370d1014afbe7e4ed4e555
|
b33d1d4b74d375a2050baf80cda5b8571aff7462
|
/s14/day01/homework2.py
|
1a595d08faafbc21bfeba3287a464e606179d299
|
[] |
no_license
|
sunwang33/code
|
e979e1b11209200fba07a99d926d76f09c83b514
|
377f3e919555bf0f02ef56c9395d57992c84fcfd
|
refs/heads/master
| 2021-01-16T18:10:08.358744
| 2018-01-01T02:58:43
| 2018-01-01T02:58:43
| 100,045,002
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
# Author:Sun Wang
menu = {
'北京':{
'海淀':{
'五道口':{
'soho':{},
'网易':{},
'google':{}
},
'中关村':{
'爱奇艺':{},
'汽车之家':{},
'youku':{},
},
'上地':{
'百度':{},
},
},
'昌平':{
'沙河':{
'老男孩':{},
'北航':{},
},
'天通苑':{},
'回龙观':{},
},
'朝阳':{},
'东城':{},
},
'上海':{
'闵行':{
"人民广场":{
'炸鸡店':{}
}
},
'闸北':{
'火车战':{
'携程':{}
}
},
'浦东':{},
},
'山东':{},
}
exit_flag = False
while not exit_flag :
for item in menu:
print (item)
choise = input("Please input your choise: ")
if choise in menu:
while not exit_flag:
for i in menu[choise]:
print ("\t",i)
choise1 = input("Please input your choise1: ")
if choise1 in menu[choise]:
while not exit_flag:
for i1 in menu[choise][choise1]:
print ("\t",i1)
choise2 = input("Please input your choise2: ")
if choise2 in menu[choise][choise1]:
while not exit_flag:
for i2 in menu[choise][choise1][choise2]:
print ("\t\t",i2)
choise3 = input("Please input your choise3: ")
if choise3 in menu[choise][choise1][choise2]:
while not exit_flag:
for i3 in menu[choise][choise1][choise2][choise3]:
print ("\t\t\t",i3)
if choise3 == 'q':
exit_flag = True
elif choise3 == 'b':
break
if choise2 == 'b':
break
if choise1 == 'b':
break
if choise == 'b':
break
|
[
"330463670@qq.com"
] |
330463670@qq.com
|
783c3f96c270a8323efbe58ab9ad72e3ffc8e029
|
1c6a7125c8ea024050045fb18a685daadcfbcb0f
|
/codeforces/random/B_Equal_Candies.py
|
5666b59c9804f96384bfdd8bf152e6b93b45323e
|
[] |
no_license
|
HurayraIIT/competitive-programming
|
0e2f40cf1cae76129eac0cd8402b62165a6c29e4
|
3b9bc3066c70284cddab0f3e39ffc3e9cd59225f
|
refs/heads/master
| 2022-12-10T18:33:10.405727
| 2022-12-06T13:15:15
| 2022-12-06T13:15:15
| 236,779,058
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
# Abu Hurayra
import sys
from collections import defaultdict
# import threading
# threading.stack_size(2**27)
# sys.setrecursionlimit(2**21)
def rs(): return sys.stdin.readline().rstrip()
def ri(): return int(sys.stdin.readline())
def ria(): return list(map(int, sys.stdin.readline().split()))
def ws(s): sys.stdout.write(s + '\n')
def wi(n): sys.stdout.write(str(n) + '\n')
def wia(a): sys.stdout.write(' '.join([str(x) for x in a]) + '\n')
# a = list(map(int, input().split()))
def main():
t = ri()
for _ in range(t):
n = ri()
a = ria()
m = min(a)
ans = 0
for i in a:
ans += i - m
print(ans)
if __name__ == '__main__':
# t = threading.Thread(target=main)
# t.start()
# t.join()
main()
|
[
"hurayraiit@gmail.com"
] |
hurayraiit@gmail.com
|
24eab0073b819cc196e8f7657f4052507436ad3f
|
007f7d8c93725457bc5692715587227d6c8acc0c
|
/blender/.blender/scripts/renameobjectbyblock.py
|
eeea815c650127d2b64e7c557b1b425a00e90a67
|
[
"GPL-2.0-only",
"PSF-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
Nicoeevee/sketchfab_download
|
cf1c72ab45a88bebb0e08d7fb984fa01a3be97fa
|
a81ad3a2053e715608e657fd62c9dc1194ffe290
|
refs/heads/master
| 2023-04-21T08:05:28.322657
| 2021-05-13T18:01:30
| 2021-05-13T18:01:30
| 354,547,290
| 0
| 0
|
Apache-2.0
| 2021-05-14T12:04:21
| 2021-04-04T13:13:28
|
Python
|
UTF-8
|
Python
| false
| false
| 4,863
|
py
|
#!BPY
""" Registration info for Blender menus: <- these words are ignored
Name: 'Object Name Editor'
Blender: 232
Group: 'Object'
Tip: 'GUI to select and rename objects.'
"""
__author__ = "Jean-Michel Soler (jms)"
__url__ = ("blender", "blenderartists.org",
"Script's homepage, http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_renameobjectgui.htm",
"Communicate problems and errors, http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender")
__version__ = "233"
__bpydoc__ = """\
This script offers a GUI to rename selected objects according to a given
rule.
Usage:
Open it from the 3d View's "Object->Scripts" menu and select the objects to
rename and the rule from the buttons in its GUI.
"""
# ----------------------------------------------------------
# Name OBJECT changer
# (c) 2004 jean-michel soler
# -----------------------------------------------------------
#----------------------------------------------
# Page officielle/offcial page du blender python Name OBJECT changer:
# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_renameobjectgui.htm
# Communiquer les problemes et erreurs sur:
# To Communicate problems and errors on:
# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender
#---------------------------------------------
# Blender Artistic License
# http://download.blender.org/documentation/html/x21254.html
#---------------------------------------------
CVS=0
import Blender
from Blender import *
from Blender.Draw import *
from Blender.BGL import *
O = list(Scene.GetCurrent().objects)
stringlist=[[],[]]
def renew():
global O
#O = Object.Get()
O = list(Scene.GetCurrent().objects)
#param= [ [p.name, i, p.getType()] for i, p in enumerate(O) ]
PARAM={}
evt=9
stringlist=[[],[],[]]
for i, ob in enumerate(O):
obname= ob.name
PARAM[obname] = [Create(ob.sel), evt, i, ob.getType(), Create(obname), evt+1, ob]
stringlist[0].append(evt+1)
stringlist[1].append(obname)
stringlist[2].append(evt)
evt+=2
return PARAM,stringlist
NEWNAME=Create('Name')
alignment={'BEGIN' : [Create(1),5],
'END' : [Create(0),6],
'POINT' : [Create(0),7],
'FULL' : [Create(0),8]}
def rename():
global NEWNAME, alignment, O, PARAM, stringlist
newname= NEWNAME.val
for obname, value in PARAM.iteritems():
if value[0].val: # Selected
if alignment['END'][0].val:
value[6].setName(obname+newname)
elif alignment['BEGIN'][0].val:
value[6].setName(newname+obname)
elif alignment['FULL'][0].val:
value[6].setName(newname)
PARAM, stringlist = renew()
PARAM, stringlist = renew()
def EVENT(evt,val):
pass
def BUTTON(evt):
global PARAM , alignment, O, stringlist, CVS
if (evt==1):
Exit()
elif (evt==2):
rename()
elif (evt==3):
PARAM, stringlist = renew()
elif (evt in [5,6,7,8]):
for k in alignment.iterkeys():
if alignment[k][1]!=evt:
alignment[k][0].val=0
elif (evt in stringlist[0]):
O[PARAM[stringlist[1][(evt-9)/2]][2]].setName(PARAM[stringlist[1][(evt-9)/2]][4].val)
PARAM, stringlist = renew()
elif (evt in stringlist[2]):
try:
O[PARAM[stringlist[1][(evt-9)/2]][2]].select(PARAM[stringlist[1][(evt-9)/2]][0].val)
except:
pass
Blender.Redraw()
def DRAW():
global PARAM, O, NEWNAME, alignment
#glColor3f(0.7, 0.7, 0.7)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(0.1, 0.1, 0.15)
size=Buffer(GL_FLOAT, 4)
glGetFloatv(GL_SCISSOR_BOX, size)
size= size.list
for s in [0,1,2,3]: size[s]=int(size[s])
ligne=20
Button ("Exit",1,20,1,80,ligne)
Button ("Rename",2,102,1,80,ligne)
Button ("Renew",3,184,1,80,ligne)
glRasterPos2f(20, ligne*2-10)
Text("Object Name Editor")
NEWNAME=String('Add String: ', 4, 150, ligne*2-16, 150, 18, NEWNAME.val,120 )
key= alignment.keys()
key.sort()
n=150+150+4
for k in key:
alignment[k][0]= Toggle(k,alignment[k][1],n,ligne*2-16, 40, 18, alignment[k][0].val)
n+=40+4
max=size[3] / 22 -2
pos = 0
decal = 20
keys=[[PARAM[k][1],k] for k in PARAM.iterkeys()]
keys.sort()
for p_ in keys:
p=p_[1]
if pos==max:
decal+=152
pos=1
else:
pos+=1
PARAM[p][0]=Toggle('S',PARAM[p][1],decal,pos*22+22,20,20, PARAM[p][0].val,"Select this one for a group renaming")
PARAM[p][4]=String('',PARAM[p][5],decal+20,pos*22+22,90,20, PARAM[p][4].val,200, "string button to rename immediately but only this object")
glRasterPos2f(decal+115,pos*22+24)
Text(PARAM[p][3][:4])
if __name__=='__main__':
Register(DRAW,EVENT,BUTTON)
|
[
"2966764421@qq.com"
] |
2966764421@qq.com
|
960540a6f9a5e5fdc7c3bb222cfbfd59bf548e8d
|
bf2d010229aece071359662f4fef44e48ba57951
|
/dynamic_range_time_step_plot.py
|
ce67b22b30e65b9b6c4f46b93df1df6ec14a9916
|
[] |
no_license
|
Osrip/CriticalEvolution
|
b97398f74e2fc5b54c9ab92765b08ce3bf97257e
|
f77cae8acc626cb4c6d64d5a44fdf00310309c2e
|
refs/heads/master
| 2021-06-24T03:44:03.283017
| 2021-04-03T13:09:42
| 2021-04-03T13:09:42
| 215,332,038
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,728
|
py
|
import os
import numpy as np
from automatic_plot_helper import load_isings_specific_path
from automatic_plot_helper import attribute_from_isings
from automatic_plot_helper import all_folders_in_dir_with
import copy
import pandas as pd
import glob
import pickle
from run_combi import RunCombi
import matplotlib.pylab as plt
from matplotlib.lines import Line2D
import seaborn as sns
import re
from isolated_population_helper import seperate_isolated_populations
def plot_dynamic_range(sim_name, plot_settings):
attrs_list_each_food_num_all, attrs_list_each_food_num_critical, attrs_list_each_food_num_sub_critcal, food_num_list = load_data(plot_settings['attr'], sim_name)
# plot_averages(attrs_list_each_food_num_all, food_num_list, sim_name, plot_settings)
plot_seperated_averages(attrs_list_each_food_num_critical, attrs_list_each_food_num_sub_critcal, food_num_list,
sim_name, plot_settings)
def plot_averages(attrs_list_each_food_num, food_num_list, sim_name, plot_settings):
avg_attr_list = [np.mean(attrs) for attrs in attrs_list_each_food_num]
plt.scatter(food_num_list, avg_attr_list)
# plt.savefig('moinsen.png')
save_dir = 'save/{}/figs/dynamic_range_plots{}/'.format(sim_name, plot_settings['add_save_name'])
save_name = 'plot_averages.png'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(save_dir+save_name, bbox_inches='tight')
plt.show()
def plot_seperated_averages(attrs_list_each_food_num_critical, attrs_list_each_food_num_sub_critical, food_num_list,
sim_name, plot_settings):
avg_attr_list_critical = [np.mean(attrs) for attrs in attrs_list_each_food_num_critical]
avg_attr_list_sub_critical = [np.mean(attrs) for attrs in attrs_list_each_food_num_sub_critical]
plt.figure(figsize=(12, 8))
# make list of list with similar food_num entries for plotting
food_num_list_extended_critical = [[food_num for i in range(len(attrs))]
for food_num, attrs in zip(food_num_list, attrs_list_each_food_num_critical)]
food_num_list_extended_sub_critical = [[food_num for i in range(len(attrs))]
for food_num, attrs in zip(food_num_list, attrs_list_each_food_num_sub_critical)]
# food_num_list_extended = np.array(food_num_list_extended)
# attrs_list_each_food_num_critical = np.array(attrs_list_each_food_num_critical)
# attrs_list_each_food_num_sub_critical = np.array(attrs_list_each_food_num_sub_critical)
# for food_num_critical, food_num_sub_critical, attr_critical, attr_sub_critical in
# zip(food_num_list_extended_critical, food_num_list_extended_critical,
# attrs_list_each_food_num_critical, attrs_list_each_food_num_sub_critical)
plt.scatter(food_num_list_extended_critical, attrs_list_each_food_num_critical,
c=plot_settings['color']['critical'], s=2, alpha=0.4)
plt.scatter(food_num_list_extended_sub_critical, attrs_list_each_food_num_sub_critical, c=plot_settings['color']['sub_critical'],
s=2, alpha=0.4)
plt.scatter(food_num_list, avg_attr_list_critical, c=plot_settings['color']['critical'], label='critical')
plt.scatter(food_num_list, avg_attr_list_sub_critical, c=plot_settings['color']['sub_critical'],
label='sub-critical')
plt.ylabel(plot_settings['attr'])
plt.xlabel('number of time steps in simulation')
plt.legend()
save_dir = 'save/{}/figs/dynamic_range_plots_time_steps{}/'.format(sim_name, plot_settings['add_save_name'])
save_name = 'plot_averages_seperated.png'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(save_dir+save_name, bbox_inches='tight')
plt.show()
# TODO: Debuggen und hier weitermachen!!
def load_data(attr, sim_name):
sim_dir = 'save/{}'.format(sim_name)
attrs_list_each_food_num_all = []
attrs_list_each_food_num_critical = []
attrs_list_each_food_num_sub_critical = []
food_num_list = []
dir_list = all_folders_in_dir_with('{}/repeated_generations'.format(sim_dir), 'dynamic_range_run_time_step')
for dir in dir_list:
isings_list = load_isings_specific_path(dir)
isings = make_2d_list_1d(isings_list)
isings_populations_seperated = seperate_isolated_populations([isings])
isings_critical = isings_populations_seperated[0][0]
isings_sub_critical = isings_populations_seperated[1][0]
attrs_list_each_food_num_all.append(attribute_from_isings(isings, attr))
attrs_list_each_food_num_critical.append(attribute_from_isings(isings_critical, attr))
attrs_list_each_food_num_sub_critical.append(attribute_from_isings(isings_sub_critical, attr))
food_num_list.append(get_int_end_of_str(dir))
return attrs_list_each_food_num_all, attrs_list_each_food_num_critical, attrs_list_each_food_num_sub_critical, food_num_list
def get_int_end_of_str(s):
m = re.search(r'\d+$', s)
return int(m.group()) if m else None
def make_2d_list_1d(in_list):
out_list = []
for sub_list in in_list:
for en in sub_list:
out_list.append(en)
return out_list
if __name__ == '__main__':
plot_settings = {}
plot_settings['add_save_name'] = ''
plot_settings['attr'] = 'avg_energy'
plot_settings['color'] = {'critical': 'darkorange', 'sub_critical': 'royalblue', 'super_critical': 'maroon'}
sim_name = 'sim-20201007-230728-g_4000_-t_8000_-iso_-ref_500_-rec_c_1000_-a_200_500_1000_2000_3000_3999_-c_3_-n_different_betas_DO_LONG_TIME_STEPS_WEAKEN_SUB_CRITICAL_and_DYNAMIC_RANGE_FOOD'
plot_dynamic_range(sim_name, plot_settings)
|
[
"jan.prosi@hotmail.com"
] |
jan.prosi@hotmail.com
|
a69e06de247ad3631563edfd5c4b3257cf2749ed
|
7c8bff784568691c516833ac81afc967857d24e2
|
/jacc/migrations/0019_entrytype_identifier.py
|
effb3d0f203ab8c4e4ea27554b71aa4fcc456877
|
[
"MIT"
] |
permissive
|
kajala/django-jacc
|
b71f2c3df1321b9bb31e1e648895931b735949a6
|
4acb8ca2d32b11fd5afa3b5316b13be223b20ec6
|
refs/heads/develop
| 2023-08-18T14:12:38.196880
| 2023-08-11T15:18:57
| 2023-08-11T15:18:57
| 121,229,896
| 11
| 5
|
MIT
| 2021-07-12T15:02:36
| 2018-02-12T10:02:20
|
Python
|
UTF-8
|
Python
| false
| false
| 746
|
py
|
# Generated by Django 2.1.2 on 2018-10-18 15:36
from django.db import migrations, models
from django.db.models import F
def migr_code_to_identifier_0019_entrytype_identifier(apps, schema):
EntryType = apps.get_model("jacc", "EntryType")
EntryType.objects.all().update(identifier=F("code"))
class Migration(migrations.Migration):
dependencies = [
("jacc", "0018_auto_20181008_2322"),
]
operations = [
migrations.AddField(
model_name="entrytype",
name="identifier",
field=models.CharField(blank=True, db_index=True, default="", max_length=40, verbose_name="identifier"),
),
migrations.RunPython(migr_code_to_identifier_0019_entrytype_identifier),
]
|
[
"kajala@gmail.com"
] |
kajala@gmail.com
|
e01a13130ccc128e63bdb0486285772b63f84edf
|
a155780658a6d2c9b4e4adfaf822ba465f8f6be8
|
/controller/jellyfish-mods/jf_phoneme.py
|
9a4988ea5646229664603c154277d4e59983d701
|
[] |
no_license
|
stcybrdgs/NLP-Matching
|
e77ab6c63281d6d859f9a68be31c8913be20d9e6
|
6b4725e68eb4233844273d3a96b0f36b14ce8e80
|
refs/heads/master
| 2020-05-25T18:21:55.009741
| 2019-06-13T07:15:47
| 2019-06-13T07:15:47
| 187,928,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 19:15:42 2019
@author: Stacy
jellyfish modules for use with the controller program
"""
import jellyfish
def soundex():
tokens = ['Ball Bearing',
'bll brng',
'Centrifugal',
'centrifigal',
'PUmp',
'pmp']
print('Running SOUNDEX...')
# print tokens
print('Tokens: ', end='')
for i in tokens:
print(i,' | ', end='')
# printcodes
print('\n', end="")
print('Codes: ', end='')
for i in tokens:
print(jellyfish.soundex(i), ' | ', end='')
# ---- end function ----
def nysiis():
tokens = ['Ball Bearing',
'bll brng',
'Centrifugal',
'centrifigal',
'PUmp',
'pmp']
print('Running NYSIIS...')
# print tokens
print('Tokens: ', end='')
for i in tokens:
print(i,' | ', end='')
# printcodes
print('\n', end="")
print('Codes: ', end='')
for i in tokens:
print(jellyfish.nysiis(i), ' | ', end='')
# ---- end function ----
|
[
"stcybrdgs@gmail.com"
] |
stcybrdgs@gmail.com
|
632789f2b0dcf3c03c1d6fd2e945bda51a359db3
|
c071eb46184635818e8349ce9c2a78d6c6e460fc
|
/system/python_stubs/-745935208/cx_Oracle/MessageProperties.py
|
641ade26f658f4b7e5bbfa26034ba4823d3e2d0f
|
[] |
no_license
|
sidbmw/PyCharm-Settings
|
a71bc594c83829a1522e215155686381b8ac5c6e
|
083f9fe945ee5358346e5d86b17130d521d1b954
|
refs/heads/master
| 2020-04-05T14:24:03.216082
| 2018-12-28T02:29:29
| 2018-12-28T02:29:29
| 156,927,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
# encoding: utf-8
# module cx_Oracle
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\cx_Oracle.cp37-win_amd64.pyd
# by generator 1.146
# no doc
# imports
import datetime as __datetime
from .object import object
class MessageProperties(object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
attempts = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
correlation = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
delay = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
deliverymode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
enqtime = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
exceptionq = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
expiration = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
msgid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
priority = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
[
"siddharthnatamai@gmail.com"
] |
siddharthnatamai@gmail.com
|
12358f25a48a53f1851f8ac5027fdd19a6973bab
|
59de7788673ade984b9c9fbc33664a7cbdba67d3
|
/res/scripts/client/gui/scaleform/daapi/view/lobby/crewoperations/__init__.py
|
bd54d7b315bf7025de933b9384553c691e7e1edd
|
[] |
no_license
|
webiumsk/WOT-0.9.15-CT
|
3fa24ab37a6c91b7073034afb2f355efa5b7fe36
|
fbd194fbaa6bdece51c7a68fc35bbb5257948341
|
refs/heads/master
| 2020-12-24T21:27:23.175774
| 2016-05-01T13:47:44
| 2016-05-01T13:47:44
| 57,600,180
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,734
|
py
|
# 2016.05.01 15:21:39 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/crewOperations/__init__.py
from gui.app_loader.settings import APP_NAME_SPACE
from gui.shared import EVENT_BUS_SCOPE
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.framework import GroupedViewSettings, ViewTypes, ScopeTemplates
from gui.Scaleform.framework.package_layout import PackageBusinessHandler
def getViewSettings():
from gui.Scaleform.daapi.view.lobby.crewOperations.CrewOperationsPopOver import CrewOperationsPopOver
from gui.Scaleform.daapi.view.lobby.crewOperations.RetrainCrewWindow import RetrainCrewWindow
return (GroupedViewSettings(VIEW_ALIAS.CREW_OPERATIONS_POPOVER, CrewOperationsPopOver, 'crewOperationsPopOver.swf', ViewTypes.WINDOW, 'crewOperationsPopOver', VIEW_ALIAS.CREW_OPERATIONS_POPOVER, ScopeTemplates.WINDOW_VIEWED_MULTISCOPE), GroupedViewSettings(VIEW_ALIAS.RETRAIN_CREW, RetrainCrewWindow, 'retrainCrewWindow.swf', ViewTypes.TOP_WINDOW, 'retrainCrewWindow', None, ScopeTemplates.DEFAULT_SCOPE))
def getBusinessHandlers():
return (CrewOpsBusinessHandler(),)
class CrewOpsBusinessHandler(PackageBusinessHandler):
def __init__(self):
listeners = ((VIEW_ALIAS.CREW_OPERATIONS_POPOVER, self.loadViewByCtxEvent), (VIEW_ALIAS.RETRAIN_CREW, self.loadViewByCtxEvent))
super(CrewOpsBusinessHandler, self).__init__(listeners, APP_NAME_SPACE.SF_LOBBY, EVENT_BUS_SCOPE.LOBBY)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\crewoperations\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.05.01 15:21:39 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
0e536a419c8eaf8064d4388c6bd6fbf237af1039
|
ae7884af1ec3965b7c0eec22edad6b74f78b7ba6
|
/client/full/src/UDSWindow.py
|
86e5b3b9b59538fda5013a6802deb4d95ceee0e4
|
[] |
no_license
|
glyptodon/openuds
|
f4eefa319a3ead827dad999d24e5ee3854d1345d
|
3908c875d30ec332490fc8c049bb537e10f10d08
|
refs/heads/master
| 2021-07-12T20:58:49.281242
| 2021-03-05T22:42:55
| 2021-03-05T22:42:55
| 62,921,174
| 0
| 1
| null | 2016-07-08T22:33:44
| 2016-07-08T22:33:44
| null |
UTF-8
|
Python
| false
| false
| 4,671
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UDSWindow.ui'
#
# Created: Mon Apr 27 21:41:43 2015
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(259, 185)
MainWindow.setCursor(QtGui.QCursor(QtCore.Qt.BusyCursor))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/logo-uds-small")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setWindowOpacity(1.0)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setAutoFillBackground(True)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setSpacing(4)
self.verticalLayout_2.setMargin(4)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.frame)
self.verticalLayout_3.setSpacing(4)
self.verticalLayout_3.setMargin(4)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.image = QtGui.QLabel(self.frame)
self.image.setMinimumSize(QtCore.QSize(0, 24))
self.image.setAutoFillBackground(True)
self.image.setText(_fromUtf8(""))
self.image.setPixmap(QtGui.QPixmap(_fromUtf8(":/images/logo-uds-small")))
self.image.setScaledContents(False)
self.image.setAlignment(QtCore.Qt.AlignCenter)
self.image.setObjectName(_fromUtf8("image"))
self.verticalLayout.addWidget(self.image)
self.info = QtGui.QLabel(self.frame)
self.info.setMaximumSize(QtCore.QSize(16777215, 16))
self.info.setObjectName(_fromUtf8("info"))
self.verticalLayout.addWidget(self.info)
self.progressBar = QtGui.QProgressBar(self.frame)
self.progressBar.setProperty("value", 24)
self.progressBar.setTextVisible(False)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.verticalLayout.addWidget(self.progressBar)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.cancelButton = QtGui.QPushButton(self.frame)
self.cancelButton.setDefault(True)
self.cancelButton.setFlat(False)
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayout.addWidget(self.cancelButton)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_3.addLayout(self.verticalLayout)
self.verticalLayout_2.addWidget(self.frame)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "UDS Connection", None))
self.info.setText(_translate("MainWindow", "TextLabel", None))
self.cancelButton.setText(_translate("MainWindow", "Cancel", None))
import UDSResources_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"dkmaster@dkmon.com"
] |
dkmaster@dkmon.com
|
eb7705bd6b9d8e6677c1899be7ba4d2bdc3f42a1
|
368be25e37bafa8cc795f7c9f34e4585e017091f
|
/.history/app_fav_books/models_20201114185225.py
|
1f3117e89b2e0690542e5f302aea450246571448
|
[] |
no_license
|
steven-halla/fav_books_proj
|
ebcfbfda0e7f3cdc49d592c86c633b1d331da513
|
512005deb84ac906c9f24d4ab0939bd0db096716
|
refs/heads/master
| 2023-03-30T09:37:38.016063
| 2021-04-02T20:27:22
| 2021-04-02T20:27:22
| 354,125,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,606
|
py
|
from django.db import models
import re
class UserManager(models.Manager):
def basic_validator(self, post_data):
errors = {}
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if len(post_data['first_name']) < 3:
errors['first_name'] = "First name must be 3 characters"
if post_data['first_name'].isalpha() == False:
errors['first_name'] = "letters only"
if len(post_data['last_name']) < 3:
errors['last_name'] = "Last name must be 3 characters"
if post_data['last_name'].isalpha() == False:
errors['last_name'] = "letters only"
if len(post_data['email']) < 8:
errors['email'] = "Email must contain 8 characters"
if post_data['email'].find("@") == -1:
errors['email'] = "email must contain @ and .com"
if post_data['email'].find(".com") == -1:
errors['email'] = "email must contain @ and .com"
# test whether a field matches the pattern
if not EMAIL_REGEX.match(post_data['email']):
errors['email'] = "Invalid email address!"
if post_data['password'] != post_data['confirm_password']:
errors['pass_match'] = "password must match confirm password"
if len(post_data['password']) < 8:
errors['pass_length'] = "password must be longer than 8 characters"
return errors
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.CharField(max_length=20)
password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class BooksManager(models.Manager):
def basic_validator(self, post_data):
errors = {}
if len(post_data['title']) < 1:
errors['title'] = "First name must be 1 characters"
if len(post_data['last_name']) < 5:
errors['desc'] = "Description must be 5 characters"
return errors
class Books(models.Model):
title = models.CharField(max_length=20)
desc = models.CharField(max_length=40)
uploaded_by = models.ForeignKey(User, related_name="books_uploaded", on_delete=models.CASCADE)
users_who_favorite = models.ManyToManyField(User, related_name="liked_books")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects=BooksManager
|
[
"69405488+steven-halla@users.noreply.github.com"
] |
69405488+steven-halla@users.noreply.github.com
|
2a9ec919aa12c00f7699703cd9f9a960cd3df308
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03696/s927043333.py
|
8af9a814da95dcba853bbf295beefd9db08be572
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
n = input()
s = input()
ss = s
for i in range(50):
s = s.replace('()','')
l = s.count(')')
r = s.count('(')
print('('*l+ss+')'*r)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4f1483114b60a70f6e62341fea4736adc8e4abbf
|
35f9695acb95029f2dd87a2cc214b0b34935de17
|
/update.py
|
9f4da1693c181b6386fe9b140233603190f5e838
|
[
"BSD-3-Clause"
] |
permissive
|
dragonix11/aurdiff
|
b5382e7fd38f4d2c370ad157fcaf18d8ba48c0d9
|
b4ffdb2afcd30cac7cf24ca42fab0f0cdc7130e0
|
refs/heads/master
| 2021-01-18T18:43:29.145163
| 2013-11-10T10:19:26
| 2013-11-10T10:19:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,337
|
py
|
# Copyright 2013 Virgil Dupras (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package.
import os.path as op
from urllib.request import urlopen
import subprocess
import json
from bs4 import BeautifulSoup
HERE = op.dirname(__file__)
AUR_FOLDER = op.join(HERE, 'aur')
BASE_URL = 'https://aur.archlinux.org'
def get_pkg_list():
result = [] # (name, version)
URL = BASE_URL + '/packages/?SB=a&SO=d&O=0&PP=250'
with urlopen(URL) as fp:
contents = fp.read()
soup = BeautifulSoup(contents)
table = soup('table', class_='results')[0]
rows = table.tbody('tr')
for row in rows:
# Strangely enough, when querying through urlopen, we don't have the checkbox column. Is
# this column added through JS?
pair = (row('td')[1].text, row('td')[2].text)
result.append(pair)
return result
def download_pkgbuild(pkgname):
URL = '%s/packages/%s/' % (BASE_URL, pkgname)
with urlopen(URL) as fp:
contents = fp.read()
soup = BeautifulSoup(contents)
pkgbuild_url = BASE_URL + soup('div', id='actionlist')[0].ul('li')[0].a['href']
with urlopen(pkgbuild_url) as fp:
contents = fp.read()
with open(op.join(AUR_FOLDER, pkgname), 'wb') as fp:
fp.write(contents)
def main():
json_path = op.join(HERE, 'lastupdate.json')
with open(json_path, 'rt') as fp:
info = json.load(fp)
lastname = info['name']
lastversion = info['version']
pkglist = get_pkg_list()
if (lastname, lastversion) in pkglist:
index = pkglist.index((lastname, lastversion))
pkglist = pkglist[:index]
if not pkglist:
print("Nothing to update")
return
for name, version in reversed(pkglist):
print("Updating %s to %s" % (name, version))
download_pkgbuild(name)
subprocess.call(['git', 'add', op.join(AUR_FOLDER, name)])
lastname, lastversion = pkglist[0]
info = {'name': lastname, 'version': lastversion}
with open(json_path, 'wt') as fp:
json.dump(info, fp)
subprocess.call(['git', 'add', json_path])
commit_msg = "Updated %d packages" % len(pkglist)
subprocess.call(['git', 'commit', '-m', commit_msg])
if __name__ == '__main__':
main()
|
[
"hsoft@hardcoded.net"
] |
hsoft@hardcoded.net
|
2c2f2040fde54d6c77504275f121070bd8c62399
|
f67469cba32f16399ef2e65d2731c5eae36a53b3
|
/config/settings/base.py
|
0fc9bcb527d5fe44323aef0ac7bfc93b4daf6ca8
|
[
"MIT"
] |
permissive
|
ZedeLab/WhatsUpAddis-BE
|
1f3fecc9c5705eca500f13aa5a831fcf81fb5faf
|
702d14eff969673ce88dbd6f4cad690cbb580c30
|
refs/heads/master
| 2023-04-06T03:53:57.537028
| 2018-11-30T21:54:24
| 2018-11-30T21:54:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,476
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
# Third Party apps
INSTALLED_APPS += [
'authtools',
]
# Project apps
INSTALLED_APPS += [
'accounts',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Addis_Ababa'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Custom Auth User Model
AUTH_USER_MODEL = 'accounts.User'
|
[
"eyobtariku@gmail.com"
] |
eyobtariku@gmail.com
|
7c31f7d64627e78b8f3832a823a1b501d4c78424
|
48e1cf7a4a39df57a38246da1f67f3f4dc8f2020
|
/With_Sql/main/buy_product_form.py
|
59abb15f507001600bb719ae5117f96c258b1c99
|
[] |
no_license
|
mdarifulislamroni21/django_website
|
601d1d0e5422419895363968a0f4d1c50dfd9daa
|
28562e3e8f07ddac49057eba07411f05b08918ff
|
refs/heads/master
| 2023-06-23T22:30:32.317834
| 2021-07-23T04:25:37
| 2021-07-23T04:25:37
| 388,668,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from django import forms
from main.models import user_details
#create New Form
class user_details_connect(forms.ModelForm):
class Meta:
model=user_details
fields="__all__"
widgets={
'buydate':forms.TextInput(attrs={'type':'date'}),
'product':forms.TextInput(attrs={'placeholder':'T-Shart'})
}
|
[
"mdarifulislamroni21@gmail.com"
] |
mdarifulislamroni21@gmail.com
|
7faecae779bde3911e044903901c3c9069dfa309
|
4fdaee9f2612a8c429991a2042dffcee80e7a641
|
/rootfs/qboxhd/rootfs/usr/local/lib/enigma2/python/Plugins/SystemPlugins/NetworkBrowser/__init__.py
|
dc8dd57b592a450dadd83765c112f3da212d8dfb
|
[] |
no_license
|
OpenSH4/qboxhd
|
841072db3b0eaecdcac116b5f96268d47115cdec
|
91dd37a5311b5c53fb088ab0ce902ee49552ece0
|
refs/heads/master
| 2020-09-07T17:55:36.114816
| 2012-01-08T21:33:02
| 2012-01-08T21:33:02
| 220,866,062
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
# -*- coding: ISO-8859-1 -*-
#===============================================================================
# NetworkBrowser and MountManager Plugin by acid-burn
# netscan lib by Nix_niX
# for further License informations see the corresponding License files
# or SourceCodes
#
#===============================================================================
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE
import os,gettext
PluginLanguageDomain = "NetworkBrowser"
PluginLanguagePath = "SystemPlugins/NetworkBrowser/po"
def localeInit():
lang = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
os.environ["LANGUAGE"] = lang # Enigma doesn't set this (or LC_ALL, LC_MESSAGES, LANG). gettext needs it!
print "[NetworkBrowser] set language to ", lang
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
print "[NetworkBrowser] fallback to default translation for", txt
t = gettext.gettext(txt)
return t
localeInit()
language.addCallback(localeInit)
|
[
"duopaguilar@0410bcea-ab32-4fec-9f21-c18eae94034e"
] |
duopaguilar@0410bcea-ab32-4fec-9f21-c18eae94034e
|
c6fe6443ef899c8ed96248028a7916f6110ff9bd
|
8e2fa36281924fd28327a49d83f9855c6ff0c619
|
/photod-backend/photod/api/urls.py
|
254dfe2cbae9365ac31db41010e200dc82289133
|
[
"MIT"
] |
permissive
|
basilfx/Photod
|
10a7ba9200b7337f01bd2f3ad857caa5b6797b7c
|
0056a07e39b61e2f3b1c94f1309917dd8b24b654
|
refs/heads/master
| 2023-08-23T17:16:48.646493
| 2017-08-15T18:37:59
| 2017-08-15T18:37:59
| 98,336,863
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from django.conf.urls import url
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
urlpatterns = [
url(r'^graphql', csrf_exempt(
GraphQLView.as_view(graphiql=settings.DEBUG)
)),
]
|
[
"basstottelaar@gmail.com"
] |
basstottelaar@gmail.com
|
cca1348bd5f51fdecc2aa6f2960bc326a23a307d
|
2b3e9b32a38f4992c529de56b4baa51e1a674c4e
|
/ccui/attachments/models.py
|
787f5715353a1411f0f00595a423e5a97c5b968d
|
[] |
no_license
|
camd/caseconductor-ui
|
2c4f63fd6c20ee421012d8770b3b873c1b4f4232
|
deb6b22ed417740bf947e86938710bd5fa2ee2e7
|
refs/heads/master
| 2021-01-18T05:36:22.647236
| 2011-10-10T14:48:29
| 2011-10-10T14:48:29
| 2,447,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
from ..core.api import RemoteObject, ListObject, fields, Named
from ..static.fields import StaticData
class Attachment(Named, RemoteObject):
name = fields.Field()
description = fields.Field()
url = fields.Field()
size = fields.Field()
attachmentType = StaticData("ATTACHMENTTYPE", "attachmentTypeId")
def __unicode__(self):
return self.name
def delete(self):
# to delete an attachment from its canonical URL requires providing an
# entityID and entityType; simpler to delete it via the entity we got
# it from.
source = getattr(self, "linked_from", None)
if source is None:
raise ValueError("Cannot delete attachment without source context.")
return super(Attachment, self).delete(
url=source._location + "/attachments/" + self.id)
class AttachmentList(ListObject):
entryclass = Attachment
api_name = "attachments"
default_url = "attachments"
entries = fields.List(fields.Object(Attachment))
def __iter__(self):
for att in super(AttachmentList, self).__iter__():
att.linked_from = self.linked_from
yield att
|
[
"carl@oddbird.net"
] |
carl@oddbird.net
|
d98640284ad3872cd54920e573886ae38180dbb4
|
a9c0daa4a7b9a4d7341afcab270c5b5debb8c13f
|
/env/bin/easy_install
|
d376d9adfa89d50b9a0aa01da0885b7db2560bbc
|
[] |
no_license
|
phamcong/alienator-plf
|
bad8c4e003fd189c43243b31ef2b975b6f154754
|
ea65628af66fbca51f2248ceb4ba93f858dbddce
|
refs/heads/master
| 2022-11-26T01:28:38.286261
| 2017-11-07T15:12:08
| 2017-11-07T15:12:08
| 109,412,097
| 0
| 1
| null | 2020-07-25T23:43:17
| 2017-11-03T15:30:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 283
|
#!/Users/cuongpham/Data/Coding/ALIENNOR/aliennor-plf/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ccuong.ph@gmail.com"
] |
ccuong.ph@gmail.com
|
|
f68897246e241e33ad3e985789afccfa8577f59c
|
29d9a947fc954e5d182cb2d8292f96c0d27cdde0
|
/apps/xrayuploader/advanced_search.py
|
cb2331799ae21142d69e1f13dec228b6e0b9caf3
|
[
"Apache-2.0"
] |
permissive
|
slogan621/tscharts
|
43ee950a76323a8b99a8ab88743f2e5e5a1e6f42
|
034e661c3f4739f1dc04b3aef096e6bf6cf6e8d3
|
refs/heads/master
| 2023-08-10T19:04:50.317820
| 2023-07-29T21:03:27
| 2023-07-29T21:03:27
| 55,322,748
| 25
| 10
|
Apache-2.0
| 2023-07-08T20:32:54
| 2016-04-03T00:35:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,491
|
py
|
# advanced_search.py
import wx
from pubsub import pub
class AdvancedSearch(wx.Panel):
def __init__(self, parent):
super().__init__(parent)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.free_text = wx.TextCtrl(self)
self.ui_helper('Free text search:', self.free_text)
self.nasa_center = wx.TextCtrl(self)
self.ui_helper('NASA Center:', self.nasa_center)
self.description = wx.TextCtrl(self)
self.ui_helper('Description:', self.description)
self.description_508 = wx.TextCtrl(self)
self.ui_helper('Description 508:', self.description_508)
self.keywords = wx.TextCtrl(self)
self.ui_helper('Keywords (separate with commas):',
self.keywords)
self.location = wx.TextCtrl(self)
self.ui_helper('Location:', self.location)
self.nasa_id = wx.TextCtrl(self)
self.ui_helper('NASA ID:', self.nasa_id)
self.photographer = wx.TextCtrl(self)
self.ui_helper('Photographer:', self.photographer)
self.secondary_creator = wx.TextCtrl(self)
self.ui_helper('Secondary photographer:', self.secondary_creator)
self.title = wx.TextCtrl(self)
self.ui_helper('Title:', self.title)
search = wx.Button(self, label='Search')
search.Bind(wx.EVT_BUTTON, self.on_search)
self.main_sizer.Add(search, 0, wx.ALL | wx.CENTER, 5)
self.SetSizer(self.main_sizer)
def ui_helper(self, label, textctrl):
sizer = wx.BoxSizer()
lbl = wx.StaticText(self, label=label, size=(150, -1))
sizer.Add(lbl, 0, wx.ALL, 5)
sizer.Add(textctrl, 1, wx.ALL | wx.EXPAND, 5)
self.main_sizer.Add(sizer, 0, wx.EXPAND)
def on_search(self, event):
query = {'q': self.free_text.GetValue(),
'media_type': 'image',
'center': self.nasa_center.GetValue(),
'description': self.description.GetValue(),
'description_508': self.description_508.GetValue(),
'keywords': self.keywords.GetValue(),
'location': self.location.GetValue(),
'nasa_id': self.nasa_id.GetValue(),
'photographer': self.photographer.GetValue(),
'secondary_creator': self.secondary_creator.GetValue(),
'title': self.title.GetValue()}
pub.sendMessage('update_ui')
pub.sendMessage('search_results', query=query)
|
[
"slogan621@gmail.com"
] |
slogan621@gmail.com
|
41753a82068da0014a137032c5c1e406e6f0b79c
|
e7c03b71f26c463b2670c52cd2fddbc198e3c8cb
|
/apps/webhooks/signals.py
|
ce67cc1fc3c8fd8ae1badfe76adf4620eba30505
|
[] |
no_license
|
nerosketch/djing2
|
71cc96f4829fc047d788dd7d8a94f1035e9740f9
|
1fbb0941f26389cbfdc8015527ab0d426c2e2c01
|
refs/heads/master
| 2023-01-13T15:12:50.492646
| 2022-11-18T11:24:21
| 2022-11-18T11:24:21
| 196,469,351
| 7
| 3
| null | 2020-02-29T19:38:37
| 2019-07-11T21:50:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,516
|
py
|
import sys
from typing import Optional, Any
from django.dispatch import receiver
from django.db.models.signals import (
post_save, post_delete,
pre_save, pre_delete
)
from rest_framework.serializers import ModelSerializer
from webhooks.models import HookObserverNotificationTypes
from webhooks.tasks import send_update2observers_task
def _model_instance_to_dict(instance, model_class) -> dict:
class _model_serializer(ModelSerializer):
class Meta:
model = model_class
fields = '__all__'
ser = _model_serializer(instance=instance)
return ser.data
def receiver_no_test(*args, **kwargs):
def _wrapper(fn):
if 'test' in sys.argv:
return fn
return receiver(*args, **kwargs)(fn)
return _wrapper
def _send2task(notify_type: HookObserverNotificationTypes, instance: Optional[Any], sender):
app_label_str = sender._meta.app_label
model_str = sender._meta.object_name
if instance:
instance_data = _model_instance_to_dict(
instance=instance,
model_class=sender
)
else:
instance_data = None
send_update2observers_task.delay(
notification_type=notify_type.value,
app_label=app_label_str,
model_str=model_str,
data=instance_data,
)
@receiver_no_test(post_save)
def _post_save_signal_handler(sender, instance, **kwargs):
_send2task(
notify_type=HookObserverNotificationTypes.MODEL_POST_SAVE,
instance=instance,
sender=sender
)
@receiver_no_test(post_delete)
def _post_del_signal_handler(sender, instance, **kwargs):
_send2task(
notify_type=HookObserverNotificationTypes.MODEL_POST_DELETE,
instance=instance,
sender=sender
)
# @receiver(post_init)
# def _post_init_signal_handler(sender, **kwargs):
# print('_post_init_signal_handler', sender, kwargs)
@receiver_no_test(pre_save)
def _pre_save_signal_handler(sender, instance, **kwargs):
_send2task(
notify_type=HookObserverNotificationTypes.MODEL_PRE_SAVE,
instance=instance if instance else None,
sender=sender
)
@receiver_no_test(pre_delete)
def _pre_del_signal_handler(sender, instance, **kwargs):
_send2task(
notify_type=HookObserverNotificationTypes.MODEL_PRE_DELETE,
instance=instance,
sender=sender
)
# @receiver(pre_init)
# def _pre_init_signal_handler(sender, **kwargs):
# print('_pre_init_signal_handler', sender, kwargs)
|
[
"nerosketch@gmail.com"
] |
nerosketch@gmail.com
|
3f2430aec993d53a12bf2b286d2c6e954df75aa9
|
07bd1848e35bbb75ef4d23f1982af618aa176852
|
/chap08/list0801.py
|
61d7b443504731e273a6c346de2ce224320e8385
|
[] |
no_license
|
kimurakousuke/MeiKaiPython
|
c0b56be8fcb79b39b0c8364e71e2da76eab613fe
|
674f6001060f56cf55e3d7336e6e4ca5f135beaf
|
refs/heads/master
| 2021-02-22T13:01:53.397290
| 2020-03-07T11:19:10
| 2020-03-07T11:19:10
| 245,377,717
| 1
| 0
| null | 2020-03-06T11:16:16
| 2020-03-06T09:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
# 元组的列表(所有元素都是元组的列表)
students = [
(2012, '福冈太郎'),
(2013, '长崎龙一'),
(2011, '熊本纹三'),
]
print('students =', students)
print('students[0] =', students[0])
print('students[1] =', students[1])
print('students[2] =', students[2])
print('students[0][0] =', students[0][0])
print('students[0][1] =', students[0][1])
print('students[1][0] =', students[1][0])
print('students[1][1] =', students[1][1])
print('students[2][0] =', students[2][0])
print('students[2][1] =', students[2][1])
|
[
"61867402+kimurakousuke@users.noreply.github.com"
] |
61867402+kimurakousuke@users.noreply.github.com
|
96b99049010ef96d63f9812089561f63157f7727
|
d9517b7c8bf044778763245b52461acd9e301399
|
/parsers.py
|
b6a3a9cabdd014bbc08af4957996032163731acb
|
[] |
no_license
|
IIKovalenko/otus_avito_bot
|
300c7727167b5c84b8e33c8181631689ba7aa532
|
abfc5ac723eb93b49eefa05ef16833b27388a7ef
|
refs/heads/master
| 2020-06-02T12:47:14.104364
| 2018-01-17T18:43:43
| 2018-01-17T18:43:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
import bs4
def parse_html_page(response_text):
return bs4.BeautifulSoup(response_text, 'html.parser')
def parse_avito_results_page_for_prices(soup):
descriptions = soup.find_all('div', {'class': 'description'})
prices_wrappers = [
d.find('div', {'class': 'about'}) for d in descriptions
]
raw_prices = [w.contents[0] for w in prices_wrappers]
return filter(None, [process_price(p) for p in raw_prices])
def parse_avito_results_page_for_first_photo(soup):
photo_wrapper = soup.find('a', {'class': 'photo-wrapper'})
return 'https:%s' % photo_wrapper.find('img')['src']
def process_price(raw_price):
price = raw_price.strip()[:-4].replace(' ', '')
return int(price) if price != '' else None
|
[
"melevir@gmail.com"
] |
melevir@gmail.com
|
c6f76eb5503fb4a9a7c9ab83f8bd10aa3fbb381b
|
94594cb9b7e48cf4a66d8564589a9d7981a89dac
|
/loader.py
|
984fa3b4f768e8646d8c167cf6c091fc99ec178c
|
[] |
no_license
|
xal9wiii4ik/parse_freelance_bot
|
26d1d6a2c0257a320288fcc0ab87d3b6d327eb79
|
49db047b74888265f01036e467d084be5ce20fda
|
refs/heads/master
| 2023-04-02T08:58:25.581354
| 2021-04-08T21:54:18
| 2021-04-08T21:54:18
| 324,841,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
import asyncio
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from data import config
from data.config import PARSER_DB
from utils.db_api.categories_command import CategoriesCommands
from utils.db_api.costs_commands import CostsCommands
from utils.db_api.payments_commands import PaymentsCommands
bot = Bot(token=config.BOT_TOKEN, parse_mode=types.ParseMode.HTML)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage, loop=asyncio.get_event_loop())
PAYMENTS_TABLE = PaymentsCommands(PARSER_DB)
CATEGORY_TABLE = CategoriesCommands(PARSER_DB)
COSTS_TABLE = CostsCommands(PARSER_DB)
|
[
"xal9wIII4ik@yandex.ru"
] |
xal9wIII4ik@yandex.ru
|
45778cfc1bbbd390bda742a7966334f7c5947b65
|
695bfbc92a1474a29270d46c7b4ae2805240b077
|
/ch-02/09-MultilayerNeuralNetwork-blind-1.py
|
195b75302a420f7861e71ae399207bba4e5d6e5b
|
[] |
no_license
|
paulhendricks/python-machine-learning
|
801563275e05fb1f611e9114581c5ef2f7b58125
|
8e1cb6bc37067cc239eaee69fd8aa13ffa405b68
|
refs/heads/master
| 2021-01-19T04:25:30.002300
| 2016-06-13T15:25:31
| 2016-06-13T15:25:31
| 50,059,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
import numpy as np
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0, 1, 1, 0]]).T
synapse_0 = 2 * np.random.random((2, 10)) - 1
synapse_1 = 2 * np.random.random((10, 20)) - 1
synapse_2 = 2 * np.random.random((20, 10)) - 1
synapse_3 = 2 * np.random.random((10, 1)) - 1
for _ in range(10000):
layer_1 = 1 / (1 + np.exp(-np.dot(X, synapse_0)))
layer_2 = 1 / (1 + np.exp(-np.dot(layer_1, synapse_1)))
layer_3 = 1 / (1 + np.exp(-np.dot(layer_2, synapse_2)))
layer_4 = 1 / (1 + np.exp(-np.dot(layer_3, synapse_3)))
layer_4_delta = (y - layer_4) * (layer_4 * (1 - layer_4))
layer_3_delta = np.dot(layer_4_delta, synapse_3.T) * (layer_3 * (1 - layer_3))
layer_2_delta = np.dot(layer_3_delta, synapse_2.T) * (layer_2 * (1 - layer_2))
layer_1_delta = np.dot(layer_2_delta, synapse_1.T) * (layer_1 * (1 - layer_1))
synapse_0 += np.dot(X.T, layer_1_delta)
synapse_1 += np.dot(layer_1.T, layer_2_delta)
synapse_2 += np.dot(layer_2.T, layer_3_delta)
synapse_3 += np.dot(layer_3.T, layer_4_delta)
|
[
"paul.hendricks.2013@owu.edu"
] |
paul.hendricks.2013@owu.edu
|
66691c5eaa3d77e4a81f63cbe55492db46fc75dd
|
6af7cf75fd919f05b47d7516da4568f92abef8aa
|
/actstream/migrations/0001_initial.py
|
802ad43280d127b600e43ed99013de043db04460
|
[] |
no_license
|
mauler/activity-stream
|
43bdb3363eda1362d66e4abec74c17a282e03d5e
|
72477ebec544c686e1691566399545642f4ff104
|
refs/heads/master
| 2020-04-10T00:31:55.970480
| 2015-03-10T13:28:51
| 2015-03-10T13:28:51
| 32,038,589
| 0
| 1
| null | 2015-03-11T20:06:21
| 2015-03-11T20:06:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('subs', '0002_profile'),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('verb', models.CharField(max_length=255)),
('description', models.TextField(null=True, blank=True)),
('action_object_object_id', models.CharField(max_length=255, null=True, blank=True)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('public', models.BooleanField(default=True)),
('data', jsonfield.fields.JSONField(null=True, blank=True)),
('action_object_content_type', models.ForeignKey(related_name='action_object', blank=True, to='contenttypes.ContentType', null=True)),
('actor', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(to='subs.Post')),
],
options={
'ordering': ('-timestamp',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('actor_only', models.BooleanField(default=True, verbose_name=b'Only follow actions where the object is the target.')),
('started', models.DateTimeField(default=django.utils.timezone.now)),
('follow_object', models.ForeignKey(related_name='followed_by', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='follow',
unique_together=set([('user', 'follow_object')]),
),
]
|
[
"xtranophilist@gmail.com"
] |
xtranophilist@gmail.com
|
2617fe025e7ddc018281f853dec2e238a062ff20
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/ask_hand_in_able_problem/number_and_person.py
|
4b08ca79400b115b7e926ee6a490cd123528665b
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
#! /usr/bin/env python
def part_and_day(str_arg):
early_time(str_arg)
print('ask_hand')
def early_time(str_arg):
print(str_arg)
if __name__ == '__main__':
part_and_day('early_man')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
046d8c76bb32eecf5ba9ad872efb2f8333d28fd3
|
6478723d180a8ef39941ba04b80c1eca9f437323
|
/244. Shortest Word Distance II.py
|
c3713fd9f7ca35dff4badc5c14b57690aff0adeb
|
[] |
no_license
|
NiuNiu-jupiter/Leetcode
|
2a49a365898ecca393cb1eb53a47f4501b25952d
|
e278ae6ded32f6a2d054ae11ad8fcc45e7bd0f86
|
refs/heads/master
| 2022-11-22T01:05:57.417538
| 2020-07-28T23:34:39
| 2020-07-28T23:34:39
| 182,104,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
"""
Design a class which receives a list of words in the constructor, and implements a method that takes two words word1 and word2 and return the shortest distance between these two words in the list. Your method will be called repeatedly many times with different parameters.
Example:
Assume that words = ["practice", "makes", "perfect", "coding", "makes"].
Input: word1 = “coding”, word2 = “practice”
Output: 3
Input: word1 = "makes", word2 = "coding"
Output: 1
Note:
You may assume that word1 does not equal to word2, and word1 and word2 are both in the list.
"""
class WordDistance:
def __init__(self, words: List[str]):
self.len = len(words)
self.dict = {}
for i, v in enumerate(words):
if not self.dict.get(v):
self.dict[v] = [i]
else:
self.dict[v].append(i)
def shortest(self, word1: str, word2: str) -> int:
l1 , l2 = self.dict[word1],self.dict[word2]
res = self.len
ptr1, ptr2 = 0 , 0
# O(m+n) time complexity
while ptr1 < len(l1) and ptr2 < len(l2):
res = min(res, abs( l1[ptr1] - l2[ptr2]))
if l1[ptr1] < l2[ptr2]:
ptr1 += 1
else:
ptr2 += 1
return res
# Your WordDistance object will be instantiated and called as such:
# obj = WordDistance(words)
# param_1 = obj.shortest(word1,word2)
|
[
"cmyumo.zhang@gmail.com"
] |
cmyumo.zhang@gmail.com
|
b0a397839d2484bb418a34d66f90b53b145a961e
|
6b6f68f507746e3e39b0e8789af5d044e27d6b0a
|
/BinarySearch/0108_ConvertSortedArrayIntoBinarySearchTree_E.py
|
9f2dbfc81f8e4a56f0487583f5e6e89bc92e38d1
|
[] |
no_license
|
PFZ86/LeetcodePractice
|
bb0012d8b3120451dda1745875836278d3362e45
|
6db9db1934bc0a8142124d8b56bf6c07bdf43d79
|
refs/heads/master
| 2021-08-28T08:43:27.343395
| 2021-08-17T20:38:32
| 2021-08-17T20:38:32
| 230,925,656
| 1
| 1
| null | 2021-08-17T20:38:32
| 2019-12-30T14:01:27
|
Python
|
UTF-8
|
Python
| false
| false
| 876
|
py
|
# https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Solution 1: binary search-like split
class Solution(object):
def sortedArrayToBST_helper(self, nums, start, end):
if start > end:
return None
mid = start + (end - start)/2
node = ListNode(nums[mid])
node.left = self.sortedArrayToBST_helper(nums, start, mid - 1)
node.right = self.sortedArrayToBST_helper(nums, mid + 1, end)
return node
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
return self.sortedArrayToBST_helper(nums, 0, len(nums) - 1)
|
[
"pengfeizang@pengfeis-iMac.fios-router.home"
] |
pengfeizang@pengfeis-iMac.fios-router.home
|
7df3bcbfa42d4a7f74ba3a7e9dbcecc69c3e55eb
|
f88e49b5aa336cea3bfd2fa46bb23048f6bfe875
|
/gaussfold/aa/asparagine.py
|
ac313a65d071a28e58827c86e16e2d7054d3d2ea
|
[] |
no_license
|
AntoinePassemiers/GDE-GaussFold
|
c50a05992447f1909b3357db40620e3ede3ffc16
|
323600a75a7b97286fd66d478111140b9496b076
|
refs/heads/master
| 2020-05-09T23:07:11.930093
| 2019-08-15T21:19:33
| 2019-08-15T21:19:33
| 181,492,718
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
# -*- coding: utf-8 -*-
# asparagine.py
# author : Antoine Passemiers
from gaussfold.aa.amino_acid import AminoAcid
from gaussfold.atom import Bond, Group, Carbon, Hydrogen, Oxygen, Nitrogen
class Asparagine(AminoAcid):
def __init__(self, **kwargs):
AminoAcid.__init__(self, 'ASN', 'N', **kwargs)
# Add side chain atoms
self.COG_group = Group('COG')
self.ND2_group = Group('ND2')
self.CB = Carbon('CB')
self.add_atom(self.CB)
self.CG = Carbon('CG', q=0.38)
self.add_atom(self.CG)
self.COG_group.add(self.CG)
self.OD1 = Oxygen('OD1', q=-0.38)
self.add_atom(self.OD1)
self.COG_group.add(self.OD1)
self.ND2 = Nitrogen('ND2', q=-0.56)
self.add_atom(self.ND2)
self.ND2_group.add(self.ND2)
self.add_bond(Bond(self.CB, self.CA))
self.add_bond(Bond(self.CG, self.CB))
self.add_bond(Bond(self.OD1, self.CG, order=2))
self.add_bond(Bond(self.ND2, self.CG))
# Add hydrogens
self.H1 = Hydrogen('H1')
self.add_atom(self.H1)
self.add_bond(Bond(self.H1, self.CB))
self.H2 = Hydrogen('H2')
self.add_atom(self.H2)
self.add_bond(Bond(self.H2, self.CB))
self.HNA = Hydrogen('HNA', q=0.28)
self.add_atom(self.HNA)
self.add_bond(Bond(self.HNA, self.ND2))
self.ND2_group.add(self.HNA)
self.HNB = Hydrogen('HNB', q=0.28)
self.add_atom(self.HNB)
self.add_bond(Bond(self.HNB, self.ND2))
self.ND2_group.add(self.HNB)
|
[
"apassemi@ulb.ac.be"
] |
apassemi@ulb.ac.be
|
c813db17e0b517e6d6d3eb324d57cdbd6b573e6a
|
95d0806ce766805beffba8144e4b83076b5f8b91
|
/hongwai/exam/thermo+hongwai_v01_hongwai.py
|
381646566d49c0ddec0464b85706d2008c0c75f5
|
[] |
no_license
|
chengw99/dell
|
1f4d5a2f20f3e61208266dc4a0adc4e18cd44ff8
|
4f932c3f0d3deb545700a9616456fb0beeb733d0
|
refs/heads/master
| 2021-04-15T08:53:49.011550
| 2019-03-23T14:56:41
| 2019-03-23T14:56:41
| 104,961,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 14:34:03 2018
@author: DELL
"""
# 此程序根据 thermo+hongwai_v01_pic_coord.py 选出的数据范围 对红外数据进行面平均
# 因为每个时间红外数据选择的范围都不同,所以不能做批量处理
import numpy as np
import pandas as pd
ipath = r'E:\py_data\hongwai'
opath = r'E:\py_output'
data = pd.read_csv(ipath+'\\'+'one-facing south-23.csv',names=np.arange(480)) # 调整输入文件
result = data.ix[1555:1595,220:260] # 选取位置的范围 坐标+—20 取面平均
a = np.array(result)
q = a.sum()/((a.shape[0])*(a.shape[1]))
print(q)
#result.to_csv(opath+'\\'+'30cm-00.csv')
|
[
"951849311@qq.com"
] |
951849311@qq.com
|
0737a2f6e17c65219c251686d16823aafc690950
|
414db33a43c50a500741784eea627ba98bb63e27
|
/0x0B-python-input_output/3-write_file.py
|
ae0c2b1e364ed751046127fc160d45a253030029
|
[] |
no_license
|
rayraib/holbertonschool-higher_level_programming
|
2308ea02bd7f97eae3643e3ce0a6489cc1ad9ff5
|
6b4196eb890ffcb91e541431da9f5f57c5b85d4e
|
refs/heads/master
| 2021-09-14T09:12:26.664653
| 2018-05-11T03:23:12
| 2018-05-11T03:23:12
| 113,070,818
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
#!/usr/bin/python3
'''write to file'''
def write_file(filename="", text=""):
'''open file to write text and return the number of bytes weritten'''
with open(filename, 'w', encoding='utf-8') as f:
return(f.write(text))
|
[
"binitarai11@gmail.com"
] |
binitarai11@gmail.com
|
ea2782e8c450f6c5a2dc9fb342bdfef834919c34
|
0a9949a7dbe5f7d70028b22779b3821c62eb6510
|
/gt_order_mongodb/conf/source_getdata0417.py
|
4d878aad59c7d787d1dd3cd6a30173596d185ecb
|
[] |
no_license
|
744996162/warehouse
|
ed34f251addb9438a783945b6eed5eabe18ef5a2
|
3efd299a59a0703a1a092c58a6f7dc2564b92e4d
|
refs/heads/master
| 2020-06-04T22:10:14.727156
| 2015-07-03T09:40:09
| 2015-07-03T09:40:09
| 35,603,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,784
|
py
|
#ecoding=utf-8
__author__ = 'Administrator'
import sys
reload(sys)
import datetime
from dateutil.parser import parse
sys.setdefaultencoding('utf-8')
from source_mysql import *
sys.path.append('..')
from conf import *
import model
class GtOrderDao(Mysql):
def __init__(self, dbtype="local"):
Mysql.__init__(self, dbtype=dbtype)
def get_orders(self, sql, model=model.Model_Order):
results = self.get_all(sql)
model_list = []
if not results:
return []
for row in results:
o_model = model()
o_model.setVale(str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4]), str(row[5]), str(row[6]), str(row[7]), str(row[8]), str(row[9]), str(row[10]), str(row[11]))
model_list.append(o_model)
return model_list
class GtOrderSubDao(Mysql):
def __init__(self, dbtype="local"):
Mysql.__init__(self, dbtype=dbtype)
def get_orders(self, sql, model=model.Model_OrderSub):
results = self.get_all(sql)
model_list = []
if not results:
return []
for row in results:
o_model = model()
o_model.order_id = str(row[0])
o_model.uid = str(row[1])
o_model.account = str(row[2])
o_model.p_info = str(row[3])
o_model.depart_date = str(row[4])
o_model.train_no = str(row[5])
o_model.depart_name = str(row[6])
o_model.arrive_name = str(row[7])
o_model.name = str(row[8])
o_model.card_type = str(row[9])
o_model.card_no = str(row[10])
o_model.phone = str(row[11])
o_model.seat_name = str(row[12])
o_model.ticket_type = str(row[13])
o_model.status = str(row[14])
o_model.price = str(row[15])
o_model.create_time = str(row[16])
model_list.append(o_model)
return model_list
class QueryOrdersDao(GtOrderDao):
def __init__(self, dbtype="gtgj89"):
GtOrderDao.__init__(self,dbtype=dbtype)
self.tablename = 'user_order_history'
self.out_base_path = '/home/huolibi/data/gt_order_all/order/'
def get_orders(self, start_day, end_day=""):
if start_day == end_day or end_day == "":
sql = "select uid, p_info, account, order_date, i_status, depart_date, depart_name, arrive_name, ticket_count, train_no,amount,create_time " \
"from " + self.tablename + " " \
"where DATE_FORMAT(create_time,'%%Y%%m%%d')='%s' " % start_day
else:
sql = "select uid, p_info, account, order_date, i_status, depart_date, depart_name, arrive_name, ticket_count, train_no,amount,create_time " \
"from " + self.tablename + " " \
"where DATE_FORMAT(create_time,'%%Y%%m%%d')>='%s' " \
"and DATE_FORMAT(create_time,'%%Y%%m%%d')< '%s' " % (start_day, end_day)
print(sql)
model_result = super(QueryOrdersDao, self).get_orders(sql)
return model_result
def query_result_to_txt(self, start_day, end_day=""):
out_file_name = start_day + "_" + end_day
result_out_path = self.out_base_path + self.tablename + "_" + out_file_name
result_output=open(result_out_path, 'a')
model_result = self.get_orders(start_day, end_day)
for row in model_result:
out_str = row.getString()
result_output.write(out_str+'\n')
return result_out_path
class QueryOrdersSubDao(GtOrderSubDao):
def __init__(self, dbtype="gtgj89"):
GtOrderSubDao.__init__(self, dbtype=dbtype)
self.tablename = 'user_sub_order'
self.out_base_path = '/home/huolibi/data/gt_order_all/ordersub/'
def get_orders(self, start_day, end_day=""):
if start_day == end_day or end_day == "":
sql = "select order_id,uid,account,p_info,depart_date,train_no,depart_name,arrive_name,name,card_type,card_no,phone,seat_name,ticket_type,status,price,create_time " \
"from " + self.tablename + " " \
"where DATE_FORMAT(create_time,'%%Y%%m%%d')='%s' " % start_day
else:
sql = "select order_id,uid,account,p_info,depart_date,train_no,depart_name,arrive_name,name,card_type,card_no,phone,seat_name,ticket_type,status,price,create_time " \
"from " + self.tablename + " " \
"where DATE_FORMAT(create_time,'%%Y%%m%%d')>='%s' " \
"and DATE_FORMAT(create_time,'%%Y%%m%%d')< '%s' " % (start_day, end_day)
print(sql)
model_result = super(QueryOrdersSubDao, self).get_orders(sql)
return model_result
def query_result_to_txt(self, start_day, end_day=""):
out_file_name = start_day + "_" + end_day
result_out_path = self.out_base_path + self.tablename + "_" + out_file_name
result_output=open(result_out_path, 'a')
model_result = self.get_orders(start_day, end_day)
for row in model_result:
out_str = row.getString()
result_output.write(out_str+'\n')
return result_out_path
def getdata(start_date="20130701", end_date="20140101",deata=10):
date_list = []
s_day = parse(start_date)
end_day = parse(end_date)
days = (end_day-s_day).days
print(s_day, end_day,days)
for i in range(0, days,deata):
day1 = (s_day+datetime.timedelta(days=i)).strftime('%Y%m%d')
day2 = (s_day+datetime.timedelta(days=i+deata)).strftime('%Y%m%d')
date_list.append([day1,day2])
return date_list
def test1():
t = QueryOrdersDao()
# tablename = t.tablename
sql ="select uid, p_info, account, order_date, i_status, depart_date, depart_name, arrive_name, ticket_count, train_no,amount,create_time from user_order "
# o_object = GtOrderDao()
results = t.query_result_to_txt("20140101", "20150101")
print(results)
def OrderOutTest():
date_list = getdata(start_date="20150105", end_date="20140215", deata=3)
for date_arr in date_list:
s_day = date_arr[0]
end_day = date_arr[1]
print(s_day,end_day)
o_orderDao = QueryOrdersDao()
results = o_orderDao.query_result_to_txt(s_day, end_day)
def OrderSubOutTest():
date_list = getdata(start_date="20150214", end_date="20150315", deata=3)
for date_arr in date_list:
s_day = date_arr[0]
end_day = date_arr[1]
print(s_day, end_day)
o_orderSubDao = QueryOrdersSubDao()
results = o_orderSubDao.query_result_to_txt(s_day, end_day)
if __name__ == '__main__':
#o_orderSubDao = QueryOrdersSubDao()
#results = o_orderSubDao.query_result_to_txt("20130715", "20150101")
# OrderOutTest()
OrderSubOutTest()
pass
|
[
"744996162@qq.com"
] |
744996162@qq.com
|
fdce2f80d345ab41a55076388777658352217168
|
4d2531f7f4984109123bb70eb0ac3c8c08bb12e8
|
/trash/faster_rcnn/.ipynb_checkpoints/faster_rcnn_r50_fpn_dcn_1x_trash-checkpoint.py
|
e845540dfb67a6500a02466dcf28428989daeb7f
|
[] |
no_license
|
jhj9109/Segmentation_ObjectDetection
|
0a3e8f92a90bed21a93b9c4cba7029330c1c4d2b
|
f40a0291b8513228be7475321ca59a1057f0aa27
|
refs/heads/master
| 2023-05-09T03:44:16.145448
| 2021-05-22T16:36:15
| 2021-05-22T16:36:15
| 369,830,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
_base_ = 'faster_rcnn_r50_fpn_1x_trash.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN',
deform_groups=1,
fallback_on_stride=False),
stage_with_dcn=(
False, True, True, True)))
|
[
"jhj91_09@naver.com"
] |
jhj91_09@naver.com
|
8cc45ae5914c90e78e71724a0f2a83de0bfd00ab
|
ebac424304a4456193843472d3e91b5de79da514
|
/order/views.py
|
58924b80fb9fe0c21934ae0e514faa8fdca632ae
|
[] |
no_license
|
haha479/bookstore
|
1a0e7f73ed973ff06fd0c94cb92329baca39b527
|
3e85e2a62d4e9dde68c5f226c2e7558c6a06a03e
|
refs/heads/master
| 2021-09-08T09:05:15.153693
| 2018-03-09T01:21:44
| 2018-03-09T01:21:44
| 115,857,026
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,927
|
py
|
from django.shortcuts import render,redirect
from utils.decorators import login_required
from django.core.urlresolvers import reverse
from users.models import Address
from book.models import Books
from order.models import OrderInfo
from order.models import OrderGods
from django_redis import get_redis_connection
from django.db import transaction
from django.http import HttpResponse,JsonResponse
from datetime import datetime
import time
@login_required
def order_place(request):
'''显示提交订单页面'''
# 接收数据
books_ids = request.POST.getlist('books_ids')
# 校验数据
if not all(books_ids):
# 跳转到购物车页面
return redirect(reverse('cart:show'))
# 用户收货地址
passport_id = request.session.get('passport_id')
addrs = Address.objects.all()
# 用户要购买的商品信息
books_li = []
# 商品的总数目和总金额
total_count = 0
total_price = 0
conn = get_redis_connection('default')
cart_key = 'cart_%d' % passport_id
for id in books_ids:
# 根据id获取商品的信息
book = Books.objects.get_books_by_id(books_id=id)
# 从redis中获取用户要购买的商品的数目
count = conn.hget(cart_key,id)
book.count = count
# 计算商品的小计(商品数量 * 价格)
amount = int(book.count) * book.price
book.amount = amount
books_li.append(book)
# 累计计算商品的总数目和总金额
total_count += int(count)
# 将所有商品的小计都相加到总金额
total_price += book.amount
# 商品运费和实付款
transit_price = 10
total_pay = total_price + transit_price
# 1,2,3
books_ids = ','.join(books_ids)
# 组织模板上下文
context = {
'addrs' : addrs,
'books_li': books_li,
'total_count': total_count,
'total_price': total_price,
'transit_price': transit_price,
'total_pay': total_pay,
'books_ids': books_ids,
}
# 使用模板
return render(request,'order/place_order.html',context=context)
@transaction.atomic
def order_commit(request):
'''生成订单'''
# 验证用户是否登陆
if not request.session.has_key('islogin'):
return JsonResponse({'res':0, 'errmsg': '用户为登陆'})
# 接收数据
addr_id = request.POST.get('addr_id')
pay_method = request.POST.get('pay_method')
books_id = request.POST.get('books_ids')
# 进行数据检验
if not all([addr_id, pay_method, books_id]):
return JsonResponse({'res':1, 'errmsg': '数据不完整'})
try:
addr = Address.objects.get(id=addr_id)
except Exception as e:
# 地址信息出错
return JsonResponse({'res':2, 'errmsg': '地址信息错误'})
if int(pay_method) not in OrderInfo.PAY_METHOD_EMUM.values():
return JsonResponse({'res': 3, 'errmsg': '不支持的支付方式'})
# 订单创建
# 组织订单信息
passport_id = request.session.get('passport_id')
# 订单id : 20171212...+ 用户的id
order_id = datetime.now().strftime('%Y%m%d%H%M%S') + str(passport_id)
# 运费
transit_price = 10
# 订单商品总数和总金额
total_count = 0
total_price = 0
# 创建一个保存点
sid = transaction.savepoint()
try:
# 向订单信息表中添加一条记录
order = OrderInfo.objects.create(order_id=order_id,
passport_id = passport_id,
addr_id=addr_id,
total_count=total_count,
total_price=total_price,
transit_price=transit_price,
pay_method=pay_method)
# 向订单商品表中添加订单商品的记录
books_ids = books_id.split(',')
conn = get_redis_connection('default')
cart_key = 'cart_%d' % passport_id
# 遍历获取用户购买的商品信息
for id in books_ids:
books = Books.objects.get_books_by_id(books_id=id)
if books is None:
transaction.savepoint_rollback(sid)
return JsonResponse({'res':4, 'errmsg': '商品信息错误'})
# 获取用户购买的商品数目
count = conn.hget(cart_key, id)
# 判断商品的库存
if int(count) > books.stock:
transaction.savepoint_rollback(sid)
return JsonResponse({'res': 5, 'errmsg': '商品库存不足'})
# 创建一条订单商品记录
OrderGods.objects.create(order_id=order_id,
books_id=id,
count=count,
price=books.price
)
# 增加商品的销量, 减少商品库存
books.sales += int(count)
total_price += int(count)
books.save()
# 累计计算商品的总数目和总额
total_count += int(count)
total_price += int(count) * books.price
# 更新订单的商品总数目和总金额
order.total_count = total_count
order.total_price = total_price
except Exception as e:
# 操作数据库出错, 进行回滚操作
transaction.savepoint_rollback(sid)
return JsonResponse({'res': 7, 'errmsg': '服务器错误'})
# 清楚购物车对应记录
conn.hdel(cart_key, *books_ids)
# 事物提交
transaction.savepoint_commit(sid)
# 返回应答
return JsonResponse({'res': 6})
|
[
"283275935@qq.com"
] |
283275935@qq.com
|
11a91f1fed378f258c0163eab10cdf4961ce6f5d
|
915c31ce84a826d225bcb1cc5f1e0323e712f6e4
|
/calculate-testing-agreement.py
|
6276ce7b87c1a1e58da5c8f6883613ea1968e06e
|
[
"Apache-2.0"
] |
permissive
|
mac389/overdosed
|
64162aaf8f57f7ca57bcc95678d0d18e231cda87
|
434255db4ea36581c9f94c7aa09ca6ca15169e8a
|
refs/heads/master
| 2021-01-10T07:44:41.804936
| 2015-06-25T23:22:51
| 2015-06-25T23:22:51
| 36,990,551
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import csv
import numpy as np
from pprint import pprint
with open('testing-sample-MC-ratings.csv','rU') as my_rating_file:
my_ratings = [row for row in csv.reader(my_rating_file)]
#Automatic ratings
with open('testing-sample-w-ratings','rU') as automatic_rating_file:
automatic_ratings = [row for row in csv.reader(automatic_rating_file,delimiter='\t')]
my_rating_numbers = np.array([x[-1] for x in my_ratings]).astype(int)
automatic_rating_numbers = np.array([True if x[1]=='True' else False for x in automatic_ratings]).astype(int)
pprint(automatic_rating_numbers)
#me, them
yes_yes = len([i for i in xrange(len(my_rating_numbers)) if my_rating_numbers[i] == 1 and automatic_rating_numbers[i] == 1])
yes_no = len([i for i in xrange(len(my_rating_numbers)) if my_rating_numbers[i] == 1 and automatic_rating_numbers[i] == 0])
no_yes = len([i for i in xrange(len(my_rating_numbers)) if my_rating_numbers[i] == 0 and automatic_rating_numbers[i] == 1])
no_no = len([i for i in xrange(len(my_rating_numbers)) if my_rating_numbers[i] == 0 and automatic_rating_numbers[i] == 0])
print ' Them'
print 'Me',yes_yes,yes_no
print 'Me',no_yes, no_no
|
[
"mac389@gmail.com"
] |
mac389@gmail.com
|
57cf2702270e99daab0cd0f773f9b28777c1fff8
|
b4ecc9c5a74f11958e7a49999d0299e7bb883d2e
|
/postgres-database/dbFunctions.py
|
34dd98379303cca9135af6e25108f79e95ad7f9a
|
[] |
no_license
|
teja0508/AcronymLookup
|
6edea8ab9bc27824b961563f5bf968b499490094
|
ea5b812c41f138b5dccabbe2c474e2da0f85ce9e
|
refs/heads/main
| 2022-12-20T08:00:30.161858
| 2020-10-18T06:01:32
| 2020-10-18T06:01:32
| 305,030,809
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,255
|
py
|
# dbFunctions.py [Rachel Gardner]
#
# This file defines the AcronymsDatabase class, which
# interfaces with the PostgreSQL backend to store acronyms, definitions
# and their contexts.
import psycopg2
import json
from collections import Counter
class AcronymDatabase:
def __init__(self):
conn = psycopg2.connect(database="acronyms", user="postgres", password="Chandra@12", host="localhost")
self.conn = conn
self.cur = conn.cursor()
def addAcronym(self, acronym):
self.cur.execute("INSERT INTO acronyms (acronym) VALUES (%s) RETURNING aid", (acronym,))
return self.cur.fetchone()[0]
def getAcronym(self, acronym):
self.cur.execute("SELECT aid FROM acronyms WHERE acronym=%s", (acronym,))
result = self.cur.fetchone()
return result[0] if result else None
def addDefinition(self, definition, context, url, aID = False):
self.cur.execute("INSERT INTO definitions (definition, context, url) VALUES (%s, %s, %s) RETURNING did", (definition,context, url))
dID = self.cur.fetchone()[0]
# if acronym exists, link this definition to existing acronym
if (aID):
self.cur.execute("INSERT INTO acronyms_definitions (aid, did) VALUES (%s, %s)", (aID, dID))
return dID
def addTrueDefinition(self, acronym, truedef, url):
self.cur.execute("SELECT true_definition FROM true_definitions WHERE acronym=%s AND url=%s", (acronym,url))
result = self.cur.fetchone()
if(not result): result = None
else: result=result[0]
if(result is None):
self.cur.execute("INSERT INTO true_definitions (acronym, true_definition, url) VALUES (%s, %s, %s)", (acronym,truedef,url))
def getTrueDefinition(self, acronym, url):
self.cur.execute("SELECT true_definition FROM true_definitions WHERE acronym=%s AND url=%s", (acronym,url))
result = self.cur.fetchone()
return result[0] if result else None
def addContext(self, context):
self.cur.execute("INSERT INTO context (context) VALUES (%s) RETURNING cid", (context,))
return self.cur.fetchone()[0]
def acronymHasDefinition(self,aID, definition):
self.cur.execute("SELECT definitions.DID from definitions JOIN acronyms_definitions ON acronyms_definitions.DID = definitions.DID WHERE definitions.definition = %s AND acronyms_definitions.AID = %s", (definition, aID))
result = self.cur.fetchone()
return result[0] if result else None
def addContext(self,definition_id, context):
newContextJSON = json.dumps(context)
self.cur.execute("UPDATE context SET context=%s FROM definitions WHERE DID=%s", (newContextJSON,definition_id))
def updateContext(self, definition_id, context):
self.cur.execute("SELECT context FROM definitions JOIN context ON definitions.CID = context.CID WHERE DID = %s LIMIT 1;", (definition_id,))
oldContextJSON = self.cur.fetchone()[0]
oldContext = Counter(json.loads(oldContextJSON))
newContext = oldContext + context
newContextJSON = json.dumps(newContext)
self.cur.execute("UPDATE context SET context=%s FROM definitions WHERE DID=%s", (newContextJSON,definition_id))
def getContextAcronymList(self):
self.cur.execute("SELECT did, context, definition FROM definitions")
result = self.cur.fetchall()
ret = []
for elem in result:
did = str(elem[0])
self.cur.execute("SELECT aid FROM acronyms_definitions WHERE did=%s" ,(did,))
aid = str(self.cur.fetchone()[0])
self.cur.execute("SELECT acronym FROM acronyms WHERE aid=%s", (aid,))
acronym = self.cur.fetchone()[0]
ret.append((acronym, elem[1], elem[2]))
return ret
def clearTrueDefTable(self):
self.cur.execute("DELETE FROM true_definitions")
def clearAcronymTables(self):
self.cur.execute("DELETE FROM definitions")
self.cur.execute("DELETE FROM acronyms")
self.cur.execute("DELETE FROM acronyms_definitions")
def close(self):
self.conn.commit() # make the changes to the database persistent
self.cur.close()
self.conn.close()
|
[
"lchandratejareddy@gmail.com"
] |
lchandratejareddy@gmail.com
|
1281fb5541740ff35cf2cb197890e7c73fb333e2
|
221cada2354556fbb969f25ddd3079542904ef5d
|
/AlgoExpert/validate_bst.py
|
24bf1e02380e497c9452015016be874e08f91ce8
|
[] |
no_license
|
syzdemonhunter/Coding_Exercises
|
4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d
|
ca71572677d2b2a2aed94bb60d6ec88cc486a7f3
|
refs/heads/master
| 2020-05-24T11:19:35.019543
| 2019-11-22T20:08:32
| 2019-11-22T20:08:32
| 187,245,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
# T: O(n)
# S: O(d)
def validateBst(tree):
return helper(tree, -float('inf'), float('inf'))
def helper(tree, min_val, max_val):
if not tree:
return True
if tree.value < min_val or tree.value >= max_val:
return False
left_valid = helper(tree.left, min_val, tree.value)
right_valid = helper(tree.right, tree.value, max_val)
return left_valid and right_valid
#######################
def validateBst(tree):
if not tree:
return True
return helper(tree, None, None)
def helper(root, lower, upper):
if not root:
return True
if lower and root.value < lower.value:
return False
if upper and root.value >= upper.value:
return False
return helper(root.left, lower, root) \
and helper(root.right, root, upper)
|
[
"syzuser60@gmail.com"
] |
syzuser60@gmail.com
|
6f92343ff23e76b2a0b1a06cdd767ecf0e444f40
|
e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67
|
/azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/sql/models/encryption_protector.py
|
28b7ccfc213258e75b4740819a87b7557399fc67
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-cellars
|
59051e496ed0e68d14e0d5d91367a2c92c95e1fb
|
49a477d42f081e52f4c5bdd39535156a2df52d09
|
refs/heads/master
| 2022-12-25T19:28:29.992466
| 2017-10-10T13:00:08
| 2017-10-10T13:00:08
| 96,081,471
| 3
| 1
| null | 2022-12-17T02:26:21
| 2017-07-03T07:17:34
| null |
UTF-8
|
Python
| false
| false
| 2,969
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class EncryptionProtector(ProxyResource):
"""The server encryption protector.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param kind: Kind of encryption protector. This is metadata used for the
Azure portal experience.
:type kind: str
:ivar location: Resource location.
:vartype location: str
:ivar subregion: Subregion of the encryption protector.
:vartype subregion: str
:param server_key_name: The name of the server key.
:type server_key_name: str
:param server_key_type: The encryption protector type like
'ServiceManaged', 'AzureKeyVault'. Possible values include:
'ServiceManaged', 'AzureKeyVault'
:type server_key_type: str or :class:`ServerKeyType
<azure.mgmt.sql.models.ServerKeyType>`
:ivar uri: The URI of the server key.
:vartype uri: str
:ivar thumbprint: Thumbprint of the server key.
:vartype thumbprint: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
'subregion': {'readonly': True},
'server_key_type': {'required': True},
'uri': {'readonly': True},
'thumbprint': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'subregion': {'key': 'properties.subregion', 'type': 'str'},
'server_key_name': {'key': 'properties.serverKeyName', 'type': 'str'},
'server_key_type': {'key': 'properties.serverKeyType', 'type': 'str'},
'uri': {'key': 'properties.uri', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
}
def __init__(self, server_key_type, kind=None, server_key_name=None):
super(EncryptionProtector, self).__init__()
self.kind = kind
self.location = None
self.subregion = None
self.server_key_name = server_key_name
self.server_key_type = server_key_type
self.uri = None
self.thumbprint = None
|
[
"Raliclo@gmail.com"
] |
Raliclo@gmail.com
|
edcd8ce0f727f58aaea7d90968b3b585f9aeab83
|
befd78e2bfdeb7aa786e8d78aa30670e72226577
|
/concurrency_with_asyncio/ch6/counting_freq_a.py
|
e4e1287524a416fe2d0e87e52535f912b079964d
|
[] |
no_license
|
owari-taro/concurrency_in_python
|
4ee2664a4e8c6a9a840ffd0878dbd53181818813
|
6f12d84b4a72cd5ddd05c74b1c94902c784e5a18
|
refs/heads/master
| 2023-03-06T12:41:15.637603
| 2023-02-28T15:02:14
| 2023-02-28T15:02:14
| 301,134,395
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
import time
freqs={}
with open("data/googlebooks-eng-all-1gram-20120701-a","r")as reader:
st=time.time()
count=0
for line in reader:
if count%10_000==0:
print(freqs)
count+=1
data=line.split("\t")
word=data[0]
count=int(data[2])
if word in freqs:
freqs[word]=freqs[word]+count
else:
freqs[word]=count
end=time.time()
print(f"{end-st}")
|
[
"taro.biwajima@gmail.com"
] |
taro.biwajima@gmail.com
|
c5597281e91a1f2336cb6e046d5aa458cb432bf9
|
e4414bd8152e52855db7ab9065ae12b7329143e0
|
/python/src/maxdiffarr.py
|
c59fcce6e0af15f92b72aaecbe371e748058fd86
|
[] |
no_license
|
catalinc/programmingpraxis-solutions
|
39cb847877ec46d2fb85740791c24889ab5654a8
|
c0b13906aa76ffac705bf108db138fb9a38bc16a
|
refs/heads/master
| 2021-03-27T16:46:47.781839
| 2017-09-09T15:17:38
| 2017-09-09T15:17:38
| 53,532,233
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
#!/usr/bin/env python
import sys
import unittest
# See http://programmingpraxis.com/2011/04/01/maximum-difference-in-an-array/
def maxdiff(a):
min_i = i = j = 0
d = -sys.maxint
for k in xrange(1, len(a)):
if a[k] < a[min_i]:
min_i = k
elif a[k] - a[min_i] > d:
d = a[k] - a[min_i]
i = min_i
j = k
return i, j, d
class Test(unittest.TestCase):
def test_1(self):
self.assertEqual((3, 4, 7), maxdiff([4, 3, 9, 1, 8, 2, 6, 7, 5]))
def test_2(self):
self.assertEqual((1, 2, 7), maxdiff([4, 2, 9, 1, 8, 3, 6, 7, 5]))
def test_3(self):
self.assertEqual((3, 7, 7), maxdiff([4, 3, 9, 1, 2, 6, 7, 8, 5]))
if __name__ == '__main__':
unittest.main()
|
[
"catalin.cristu@gmail.com"
] |
catalin.cristu@gmail.com
|
71a2d7cbbd8c0b66f75a771056748b3e7f026054
|
aadea82d00400b71de86b1906ed347d10416e69b
|
/p350.py
|
b495e8cb02bea100bc4e51baabf5fe09685b80ad
|
[] |
no_license
|
abishekravi/guvipython
|
fc0f56912691cd5a41ab20f0c36b2027ebccfb00
|
4fbb83f0a131775cd9eb3f810c2d1c9ad22d710a
|
refs/heads/master
| 2021-08-16T10:22:00.052735
| 2020-06-25T04:35:42
| 2020-06-25T04:35:42
| 196,218,458
| 2
| 27
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
#a
import sys,string, math,itertools
def minChCnt(s) :
dic1 = {}
for c in s :
if not c.isspace() :
dic1[c] = dic1.get(c,0) + 1
min1 = sys.maxsize
L = []
min1 = min(dic1.values())
for k,v in dic1.items() :
if v == min1 :
L.append(k)
return L
s = input()
n = len(s)
L = minChCnt(s)
print(*L)
|
[
"noreply@github.com"
] |
abishekravi.noreply@github.com
|
41be5f261a4866f3eca403bdcbdb7c9fee1a2e7d
|
7ff12fccd5da300c6f844e01ae15399804d28107
|
/41/lambda.py
|
b827614414b2610ff8e1e0f176b3ba5efbd2b92e
|
[] |
no_license
|
weilaidb/python_dd
|
cc49d21d4ad8b6e56b80ea068b95255502eb9ea5
|
7458af7fb028850999cdbf089ac6c61a55096c25
|
refs/heads/master
| 2021-01-10T01:36:35.539121
| 2016-02-24T14:25:23
| 2016-02-24T14:25:23
| 47,059,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
#!/usr/bin/python
#Filename:lambda.py
def make_repeater(n):
return lambda s:s*n
twice = make_repeater(2)
print twice('word')
print twice(15)
|
[
"you@example.com"
] |
you@example.com
|
817266a98c195d0592a873a56976b22261b0798b
|
d8fb7147e9b471fafb9b1e7ae208e20e1eab9cc1
|
/lmtk/html/meta.py
|
6e1b1fee1fa05847fa7a141fc3493ed38d3f63da
|
[
"MIT"
] |
permissive
|
ericchen/lmtk
|
e61e5c1f0790dfba32f2ceb406df16045332b15f
|
ce2604851af66801f459b2e9bc5aeaf0c94bc15d
|
refs/heads/master
| 2021-01-21T04:08:08.336582
| 2014-09-23T14:02:01
| 2014-09-23T14:02:01
| 33,095,938
| 1
| 0
| null | 2015-03-30T00:53:51
| 2015-03-30T00:53:50
| null |
UTF-8
|
Python
| false
| false
| 5,134
|
py
|
# -*- coding: utf-8 -*-
"""
lmtk.html.meta
~~~~~~~~~~~~~~
Tools for extracting metadata from HTML.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import re
from bs4 import BeautifulSoup
from lmtk.text import u
from lmtk.bib import PersonName
def extract_metadata(html):
"""Parse a HTML page to extract embedded metadata.
TODO: Is this obsolete due to lmtk.scrape package?
:param html: The HTML to parse. Either as a string or BeautifulSoup object.
"""
def resolve_meta(names):
"""Given a list of meta names, return the content of the first that is found."""
for name in names:
try:
return u(html.find('meta', attrs={'name': name, 'content': True})['content'].strip())
except TypeError:
continue
if isinstance(html, basestring):
html = BeautifulSoup(html, 'lxml')
meta = {
u'title': resolve_meta(['citation_title', 'dc.title', 'DC.title', 'title', 'citation_dissertation_name']),
u'journal': resolve_meta(['citation_journal_title', 'prism.publicationName', 'dc.source', 'DC.source']),
u'volume': resolve_meta(['citation_volume', 'prism.volume']),
u'issue': resolve_meta(['citation_issue', 'prism.number', 'citation_technical_report_number']),
u'page': resolve_meta(['citation_firstpage', 'prism.startingPage']),
u'abstract': resolve_meta(['description', 'dc.description', 'DC.description']),
u'publisher': resolve_meta(['citation_publisher', 'dc.publisher', 'DC.publisher']),
u'conference': resolve_meta(['citation_conference_title', 'citation_conference']),
u'institution': resolve_meta(['citation_dissertation_institution', 'citation_technical_report_institution']),
u'doi': resolve_meta(['citation_doi', 'dc.identifier', 'DC.identifier']),
u'issn': resolve_meta(['citation_issn', 'prism.issn']),
u'isbn': resolve_meta(['citation_isbn']),
u'pmid': resolve_meta(['citation_pmid']),
u'language': resolve_meta(['citation_language', 'dc.language', 'DC.language']),
u'copyright': resolve_meta(['dc.copyright', 'DC.copyright', 'prism.copyright']),
u'rights_agent': resolve_meta(['dc.rightsAgent', 'DC.rightsAgent', 'prism.rightsAgent']),
u'patent_number': resolve_meta(['citation_patent_number']),
u'patent_country': resolve_meta(['citation_patent_country']),
u'abstract_url': resolve_meta(['citation_abstract_html_url']),
u'html_url': resolve_meta(['citation_fulltext_html_url']),
u'pdf_url': resolve_meta(['citation_pdf_url']),
u'date': resolve_meta(['citation_publication_date', 'prism.publicationDate', 'citation_date', 'dc.date',
'DC.date', 'citation_online_date'])
}
# authors
persons = []
for metaname in ['citation_author', 'dc.creator', 'DC.creator']:
for el in html.find_all('meta', attrs={'name': metaname, 'content': True}):
person = PersonName(el['content'])
if person and not any(person.could_be(other) for other in persons):
persons.append(person)
persons = [dict(p) for p in persons]
affiliations = [el.get('content', None) for el in html.find_all('meta', attrs={'name': 'citation_author_institution'})]
if len(affiliations) == len(persons):
for i, aff in enumerate(affiliations):
persons[i][u'affiliation'] = [u(aff)]
meta[u'authors'] = persons
# keywords
keywords = set()
for metaname in ['dc.type', 'prism.section', 'citation_keywords']:
for el in html.find_all('meta', attrs={'name': metaname, 'content': True}):
kcomps = [u(k.strip()) for k in el['content'].split(',')]
keywords.update(kcomps)
meta[u'keywords'] = list(keywords)
# last page
last = html.find('meta', attrs={'name': 'citation_lastpage', 'content': True})
if last and 'content' in last.attrs and 'page' in meta:
meta[u'page'] = '%s-%s' % (meta[u'page'], u(last['content']))
# XML URL
xml = html.find('link', attrs={'rel': 'alternate', 'href': True,
'type': re.compile(r'^(text/xml|application/rdf\+xml)$')})
if xml:
meta[u'xml_url'] = u(xml['href'])
# PDF URL backup
pdf = html.find('link', attrs={'rel': 'alternate', 'type': 'application/pdf', 'href': True})
if not 'pdf_url' in meta and pdf:
meta[u'pdf_url'] = u(pdf['href'])
# title backup
if not 'title' in meta and html.title:
meta[u'title'] = html.title.string.strip()
for el in html.find_all(attrs={'rel': 'license', 'href': True}):
meta[u'license'] = u(el['href'])
if not 'license' in meta:
lic = html.find('meta', attrs={'name': re.compile(r'^dc\.rights$', re.I), 'content': True})
if lic:
meta[u'license'] = u(lic['content'])
meta = dict([(k, v) for k, v in meta.items() if v])
return meta
|
[
"m.swain@me.com"
] |
m.swain@me.com
|
f5a2e5a623c38797822aa43672dd567a377d6369
|
379e932003bac6869260a6b362bce589951c96a1
|
/backend/test_25201/settings.py
|
ed9bc57d4ac097d5d02a1401e46104c4c50ebc7a
|
[] |
no_license
|
crowdbotics-apps/test-25201
|
31e0020b73f87fd28b15a57f3702750cde7eba4e
|
9cd38854979c7d6680a2b0f8ac32c71b5e3e13c1
|
refs/heads/master
| 2023-03-27T10:57:58.090951
| 2021-03-23T02:08:52
| 2021-03-23T02:08:52
| 350,550,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,214
|
py
|
"""
Django settings for test_25201 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"task",
"task_profile",
"tasker_business",
"location",
"wallet",
"task_category",
]
LOCAL_APPS = [
"home",
"modules",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "test_25201.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "web_build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "test_25201.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "web_build/static"),
]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
acf5f3f8c41ffe2c340d87e431ce7c7df30ad01d
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/70_1.py
|
ec22d692bf592f3d3a12f7546ec72edd733f712b
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
Python – Remove duplicate values across Dictionary Values
Sometimes, while working with Python dictionaries, we can have a problem in
which we need to remove all the duplicate values across all the dictionary
value lists. This problem can have application in data domains and web
development domains. Let’s discuss certain ways in which this task can be
performed.
> **Input** : test_dict = {‘Manjeet’: [1], ‘Akash’: [1, 8, 9]}
> **Output** : {‘Manjeet’: [], ‘Akash’: [8, 9]}
>
> **Input** : test_dict = {‘Manjeet’: [1, 1, 1], ‘Akash’: [1, 1, 1]}
> **Output** : {‘Manjeet’: [], ‘Akash’: []}
**Method #1 : UsingCounter() \+ list comprehension**
The combination of above functions can be used to solve this problem. In this,
we use Counter() to extract all frequencies and list comprehension to assign
the value with single occurrence in value list.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Remove duplicate values across Dictionary Values
# Using Counter() + list comprehension
from collections import Counter
# initializing dictionary
test_dict = {'Manjeet' : [1, 4, 5, 6],
'Akash' : [1, 8, 9],
'Nikhil': [10, 22, 4],
'Akshat': [5, 11, 22]}
# printing original dictionary
print("The original dictionary : " + str(test_dict))
# Remove duplicate values across Dictionary Values
# Using Counter() + list comprehension
cnt = Counter()
for idx in test_dict.values():
cnt.update(idx)
res = {idx: [key for key in j if cnt[key] == 1]
for idx, j in test_dict.items()}
# printing result
print("Uncommon elements records : " + str(res))
---
__
__
**Output :**
> The original dictionary : {‘Akshat’: [5, 11, 22], ‘Nikhil’: [10, 22, 4],
> ‘Manjeet’: [1, 4, 5, 6], ‘Akash’: [1, 8, 9]}
> Uncommon elements records : {‘Akshat’: [11], ‘Nikhil’: [10], ‘Manjeet’: [6],
> ‘Akash’: [8, 9]}
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
485b3c5be8e95fe1bbf16aff7303d04fa4db5320
|
4ddf4fa6a4a499d64b23fb99d70a7bb3802fd1b0
|
/utils.py
|
3cbae7d209d317c10c8b8a9c0dd055c5e057feec
|
[] |
no_license
|
biterbilen/MVML
|
2b318b3883c00ed1908ef75924077e3aab639094
|
76a79ded26d09452234b7ae2b4809e47aa93df70
|
refs/heads/master
| 2023-01-13T10:04:10.269589
| 2020-11-16T18:55:19
| 2020-11-16T18:55:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
import subprocess
import pandas as pd
class HexTransformer:
@staticmethod
def hex2rgb(hex):
hex = hex.replace("#", "")
return int(hex[0:2], 16), int(hex[2:4], 16), int(hex[4:6], 16)
def fit(self, X, y=None):
return self
def transform(self, X):
return X.apply(self.hex2rgb).apply(pd.Series)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def clean_up():
subprocess.run(
"rm -rf .mummify .venv __pycache__ .ipynb_checkpoints mummify.log Procfile requirements.txt runtime.txt pipe.pkl Dockerfile",
shell=True,
)
if __name__ == "__main__":
clean_up()
|
[
"max.humber@gmail.com"
] |
max.humber@gmail.com
|
60567d1fa526b9f96876073dc18939f56fe55711
|
7be4f595d555614a28f708c1ba7edda321f0cf30
|
/practice/algorithms/greedy/jim_and_the_orders/jim_and_the_orders.py
|
c2536053bea15992893c3f2b2b86f41f2c9beaa8
|
[] |
no_license
|
orel1108/hackerrank
|
de31a2d31aaf8aeb58477d1f2738744bfe492555
|
55da1f3a94e8c28ed0f0dea3103e51774f0047de
|
refs/heads/master
| 2021-04-09T17:38:25.112356
| 2017-01-22T11:21:19
| 2017-01-22T11:21:19
| 50,198,159
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
#!/usr/bin/env python
n = int(raw_input().strip())
t = []
for CNT in range(n):
ti, di = map(int, raw_input().strip().split())
t.append((ti + di, CNT + 1))
for VAL in sorted(t):
print VAL[1],
|
[
"r.orlovskyi@gmail.com"
] |
r.orlovskyi@gmail.com
|
ef6e31cf1ee34575fe9a8110458a4b71ff5f2aef
|
6994917b9d22e9e15e578a0e5c75dcf4ce3cb022
|
/formularios/migrations/0002_auto_20200629_1106.py
|
ec722847b976eba1bbb468b1210f95fa3a236b6e
|
[] |
no_license
|
linikerunk/rh-ticket
|
59ad6411a3d08c90c2704b37ba9bba67ea7f7754
|
bd8edd3eb1ea6cfe04fee03a4f41049a84c1e14a
|
refs/heads/master
| 2023-01-06T21:25:06.851369
| 2020-10-29T20:32:53
| 2020-10-29T20:32:53
| 250,346,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
# Generated by Django 2.2.9 on 2020-06-29 14:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('formularios', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='justificativaausencia',
name='ticket',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='justificativa_ausencia', to='chamados.Ticket', verbose_name='Ticket'),
),
]
|
[
"linikerenem@gmail.com"
] |
linikerenem@gmail.com
|
0e2eae513dca547791fbedea008e887c86f7689a
|
3d9c7d046674c0fca73df7dd0d0281be7600d6c3
|
/bases/renderers/rect_renderer.py
|
c179c4649b1253a742b051642c1cfb8baf202a22
|
[] |
no_license
|
qhuydtvt/micro-war
|
9a5fef988ca229278e0b93319fd2be10a0b758b0
|
7151905277089b12c7312368ff85aba040e6fe5d
|
refs/heads/master
| 2020-03-09T20:36:43.280626
| 2018-06-18T22:55:12
| 2018-06-18T22:55:12
| 128,989,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from bases.renderers.colors import RED
import pygame
class RectRenderer:
def __init__(self, width, height):
self.width = width
self.height = height
self.color = RED
self.line_width = 1
def draw(self, screen, position):
rect = (position.x - self.width / 2, position.y - self.height / 2, self.width, self.height)
pygame.draw.rect(screen, self.color, rect, self.line_width)
|
[
"qhuydtvt@gmail.com"
] |
qhuydtvt@gmail.com
|
3be1cc1cbe3da3822f1507033315a0b55fd4c5ac
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/third_party/blink/web_tests/external/wpt/fetch/content-type/resources/content-type.py
|
0b5e93b937c293250b33ae2cb2e5cbe43e381a86
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
def main(request, response):
values = request.GET.get_list("value")
content = request.GET.first("content", "<b>hi</b>\n")
output = "HTTP/1.1 200 OK\r\n"
output += "X-Content-Type-Options: nosniff\r\n"
if "single_header" in request.GET:
output += "Content-Type: " + ",".join(values) + "\r\n"
else:
for value in values:
output += "Content-Type: " + value + "\r\n"
output += "Content-Length: " + str(len(content)) + "\r\n"
output += "\r\n"
output += content
response.writer.write(output)
response.close_connection = True
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
8cfce97e1d57c3a59f1770fd204e848d6bae7536
|
93092ee9f65e872ccb2826291cfdcaf3c3ae72c9
|
/store/views.py
|
3f7dde607142fabbe221fa82813c2e2d4497af20
|
[] |
no_license
|
SergioRodas/Django-ecommerce
|
5893d791a6d8d9c953ed4d3389ce277c23b14001
|
258710dde069ddc9058d9f75d8218e4e14e9899d
|
refs/heads/main
| 2023-08-15T18:09:36.431537
| 2021-10-24T23:52:33
| 2021-10-24T23:52:33
| 403,732,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,807
|
py
|
from django.shortcuts import render
from django.http import JsonResponse
import json
import datetime
from .models import *
from .utils import cookieCart, cartData, guestOrder
def store(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
products = Product.objects.all()
featured = products.all()[0:3]
women = products.all()[3:7]
news = products.all()[7:11]
context = {'products':products, 'featured':featured, 'women':women, 'news':news, 'cartItems':cartItems}
return render(request, 'store/store.html', context)
def shop(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
products = Product.objects.all()
context = {'products':products, 'cartItems':cartItems}
return render(request, 'store/shop.html', context)
def cart(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'store/cart.html', context)
def checkout(request):
data = cartData(request)
cartItems = data['cartItems']
order = data['order']
items = data['items']
context = {'items':items, 'order':order, 'cartItems':cartItems}
return render(request, 'store/checkout.html', context)
def updateItem(request):
data = json.loads(request.body)
productId = data['productId']
action = data['action']
print('Action:', action)
print('Product:', productId)
customer = request.user.customer
product = Product.objects.get(id=productId)
order, created = Order.objects.get_or_create(customer=customer, complete=False)
orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)
if action == 'add':
orderItem.quantity = (orderItem.quantity + 1)
elif action == 'remove':
orderItem.quantity = (orderItem.quantity - 1)
orderItem.save()
if orderItem.quantity <= 0:
orderItem.delete()
return JsonResponse('Item was added', safe=False)
def processOrder(request):
transaction_id = datetime.datetime.now().timestamp()
data = json.loads(request.body)
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
else:
customer, order = guestOrder(request, data)
total = float(data['form']['total'])
order.transaction_id = transaction_id
if total == order.get_cart_total:
order.complete = True
order.save()
if order.shipping == True:
ShippingAddress.objects.create(
customer=customer,
order=order,
address=data['shipping']['address'],
city=data['shipping']['city'],
state=data['shipping']['state'],
zipcode=data['shipping']['zipcode'],
)
return JsonResponse('Payment submitted..', safe=False)
|
[
"sergiorodasbj@hotmail.com"
] |
sergiorodasbj@hotmail.com
|
13f5b7dd1c8917cdf7aade9b6ea9ee975608d31b
|
15978aacf0e44a890e36ff94c305aca5a056e5e8
|
/10day--周六补/04-getpid和getppid.py
|
ec53cc565bba6bd0c33838ef3262a68ca235a100
|
[] |
no_license
|
ittoyou/1805_python_2
|
ffbe613d893208b2454ef4f25cc2b8a9951ff047
|
1d6331a83598863042912bb26205d34417abed73
|
refs/heads/master
| 2020-03-24T13:58:12.276827
| 2018-07-27T07:58:57
| 2018-07-27T07:58:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
import os
pid = os.fork()
if pid == 0:
print("老王")
print("我是子进程进程号是%d 我的父亲进程是%d"%(os.getpid(),os.getppid()))
else:
print("我是父进程我的进程号是%d 我的父亲的进程号是%d"%(os.getpid(),os.getppid()))
print("老宋")
|
[
"qingyuan@geekniu.com"
] |
qingyuan@geekniu.com
|
d17035de6b5911c40b4cb3b07529449ea464fef3
|
ab5dfb7e6d14996b8340c5202ad49024d1f1ef02
|
/grpclib/__init__.py
|
0a3588e68466cf743000d59ab82de19a532ce2ce
|
[
"BSD-3-Clause"
] |
permissive
|
SupperSpiderMan/grpclib
|
f4352fbbfcfc5a286e2174e964f51d0e7428d2ae
|
89fbfd514f1f377a16d64c5a9732cf71090e0a7a
|
refs/heads/master
| 2021-04-23T15:32:45.354275
| 2020-03-20T12:41:14
| 2020-03-20T13:05:19
| 249,936,393
| 1
| 0
|
BSD-3-Clause
| 2020-03-25T09:27:41
| 2020-03-25T09:27:40
| null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
from .const import Status
from .exceptions import GRPCError
__version__ = '0.3.2rc1'
__all__ = (
'Status',
'GRPCError',
)
|
[
"vladimir@magamedov.com"
] |
vladimir@magamedov.com
|
8ac3a44f6b6bec67bac5320465fb81c6316968c2
|
63768dc92cde5515a96d774a32facb461a3bf6e9
|
/jacket/api/compute/openstack/compute/schemas/migrate_server.py
|
b71561ae9ff7c244124debe5f8a8438ca30924d7
|
[
"Apache-2.0"
] |
permissive
|
ljZM33nd/jacket
|
6fe9156f6f5789e5c24425afa7ce9237c302673d
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
refs/heads/master
| 2023-04-16T11:02:01.153751
| 2016-11-15T02:48:12
| 2016-11-15T02:48:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,752
|
py
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from jacket.api.compute.validation import parameter_types
host = copy.deepcopy(parameter_types.hostname)
host['type'] = ['string', 'null']
migrate_live = {
'type': 'object',
'properties': {
'os-migrateLive': {
'type': 'object',
'properties': {
'block_migration': parameter_types.boolean,
'disk_over_commit': parameter_types.boolean,
'host': host
},
'required': ['block_migration', 'disk_over_commit', 'host'],
'additionalProperties': False,
},
},
'required': ['os-migrateLive'],
'additionalProperties': False,
}
block_migration = copy.deepcopy(parameter_types.boolean)
block_migration['enum'].append('auto')
migrate_live_v2_25 = copy.deepcopy(migrate_live)
del migrate_live_v2_25['properties']['os-migrateLive']['properties'][
'disk_over_commit']
migrate_live_v2_25['properties']['os-migrateLive']['properties'][
'block_migration'] = block_migration
migrate_live_v2_25['properties']['os-migrateLive']['required'] = (
['block_migration', 'host'])
|
[
"nkapotoxin@gmail.com"
] |
nkapotoxin@gmail.com
|
fb5b8741a7f3ab1b0292653c1e456d52c6fbf297
|
27acd9eeb0d2b9b6326cc0477e7dbb84341e265c
|
/test/vraag4/src/warmste-week/44.py
|
d719315393ffa6bef0b3edfc9fb3c08bf9786cf3
|
[] |
no_license
|
VerstraeteBert/algos-ds
|
e0fe35bc3c5b7d8276c07250f56d3719ecc617de
|
d9215f11cdfa1a12a3b19ade3b95fa73848a636c
|
refs/heads/master
| 2021-07-15T13:46:58.790446
| 2021-02-28T23:28:36
| 2021-02-28T23:28:36
| 240,883,220
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
def gift_inschrijven(tup,woordenboek):
klas=tup[0]
bedrag=tup[1]
if klas in woordenboek:
geld=woordenboek[klas]
geld+=bedrag
woordenboek[klas]=geld
else:
woordenboek[klas]=bedrag
return woordenboek
|
[
"bertverstraete22@gmail.com"
] |
bertverstraete22@gmail.com
|
e5787ad9acf58585924c86ca9994a3c142f6c94b
|
060ce17de7b5cdbd5f7064d1fceb4ded17a23649
|
/fn_cisco_umbrella_inv/fn_cisco_umbrella_inv/components/umbrella_dns_rr_hist.py
|
e69d2201b06fc738b42f948cd0c24e135de5bb51
|
[
"MIT"
] |
permissive
|
ibmresilient/resilient-community-apps
|
74bbd770062a22801cef585d4415c29cbb4d34e2
|
6878c78b94eeca407998a41ce8db2cc00f2b6758
|
refs/heads/main
| 2023-06-26T20:47:15.059297
| 2023-06-23T16:33:58
| 2023-06-23T16:33:58
| 101,410,006
| 81
| 107
|
MIT
| 2023-03-29T20:40:31
| 2017-08-25T14:07:33
|
Python
|
UTF-8
|
Python
| false
| false
| 7,764
|
py
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
""" Resilient functions component to run an Umbrella investigate Query - DNS RR History for an IP Address or domain
against a Cisco Umbrella server """
# Set up:
# Destination: a Queue named "umbrella_investigate".
# Manual Action: Execute a REST query against a Cisco Umbrella server.
import json
import logging
from datetime import datetime
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from fn_cisco_umbrella_inv.util.resilient_inv import ResilientInv
from fn_cisco_umbrella_inv.util.helpers import validate_opts, validate_params, process_params, is_none, \
create_attachment, get_proxies
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'umbrella_dns_rr_hist' of
package fn_cisco_umbrella_inv.
The Function does a Cisco Umbrella Investigate query lookup takes the following parameters:
resource , dns_type
An example of a set of query parameter might look like the following:
umbinv_resource = "1.2.3.4" or resource = "example.com"
umbinv_dns_type = "A"
The Investigate Query will executes a REST call against the Cisco Umbrella Investigate server and returns a result
in JSON format similar to the following.
{'resource_name': 'cosmos.furnipict.com',
'query_execution_time': '2018-05-02 16:03:15',
"dns_rr_history": { "rrs": [ {
"rr": "www.example.com.",
"ttl": 86400,
"class": "IN",
"type": "A",
"name": "93.184.216.119"
}
...
...
],
"features": {
"rr_count": 19,
"ld2_count": 10,
"ld3_count": 14,
"ld2_1_count": 7,
"ld2_2_count": 11,
"div_ld2": 0.5263157894736842,
"div_ld3": 0.7368421052631579,
"div_ld2_1": 0.3684210526315789,
"div_ld2_2": 0.5789473684210527
}
}
}
"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_cisco_umbrella_inv", {})
validate_opts(self)
self.proxies = get_proxies(opts, self.options)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_cisco_umbrella_inv", {})
self.proxies = get_proxies(opts, self.options)
@function("umbrella_dns_rr_hist")
def _umbrella_dns_rr_hist_function(self, event, *args, **kwargs):
"""Function: Resilient Function : Cisco Umbrella Investigate for DNS RR History for a IP, Type and Domain Name"""
try:
# Get the function parameters:
umbinv_resource = kwargs.get("umbinv_resource") # text
umbinv_dns_type = self.get_select_param(kwargs.get("umbinv_dns_type")) # select, values: "A", "NS", "MX", "TXT", "CNAME"
incident_id = kwargs.get("incident_id") # number
artifact_type = kwargs.get("artifact_type") # text
log = logging.getLogger(__name__)
log.info("umbinv_resource: %s", umbinv_resource)
log.info("umbinv_dns_type: %s", umbinv_dns_type)
log.info("incident_id: %s", incident_id)
log.info("artifact_type: %s", artifact_type)
if is_none(umbinv_resource):
raise ValueError("Required parameter 'umbinv_resource' not set")
if is_none(umbinv_dns_type):
raise ValueError("Required parameter 'umbinv_dns_type' not set")
if is_none(incident_id):
raise ValueError("Required parameter 'incident_id' not set")
if is_none(artifact_type):
raise ValueError("Required parameter 'artifact_type' not set")
yield StatusMessage("Starting...")
res = None
res_type = None
process_result = {}
func_name = event.name
params = {"resource": umbinv_resource.strip(), "dns_type": umbinv_dns_type, "incident_id": incident_id,
"artifact_type": artifact_type}
validate_params(params)
process_params(params, process_result)
if "_res" not in process_result or "_res_type" not in process_result:
raise ValueError("Parameter 'umbinv_resource' was not processed correctly")
else:
res = process_result.pop("_res")
res_type = process_result.pop("_res_type")
if res_type != "domain_name" and res_type != "ip_address":
raise ValueError("Parameter 'umbinv_resource' was an incorrect type '{}', should be a 'domain name', "
"or an 'ip address'.".format(res_type))
api_token = self.options.get("api_token")
base_url = self.options.get("base_url")
rinv = ResilientInv(api_token, base_url, proxies=self.proxies)
yield StatusMessage("Running Cisco Investigate query...")
rtn = rinv.rr_history(res, query_type=umbinv_dns_type)
query_execution_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if ("rrs" in rtn and len(rtn["rrs"]) == 0) \
or ("rrs_tf" in rtn and len(rtn["rrs_tf"]) == 0):
log.debug(json.dumps(rtn))
yield StatusMessage("No Results returned for resource '{}' with query type '{}'."
.format(res, umbinv_dns_type))
results = {}
elif ("rrs" in rtn and len(rtn["rrs"]) > int(self.options.get("results_limit", "200"))) \
or ("rrs_tf" in rtn and len(rtn["rrs_tf"]) > int(self.options.get("results_limit", "200"))):
att_report = create_attachment(self, func_name, res, params, rtn, query_execution_time)
# Add in "query_execution_time" and "ip_address" to result to facilitate post-processing.
results = {"over_limit": True, "resource_name": res, "att_name": att_report["name"],
"query_execution_time": query_execution_time}
yield StatusMessage("Returning 'dns_rr_history' results for resource '{0}' as attachment: {1} ."
.format(res,att_report["name"]))
else:
# Add in "query_execution_time" and "ip_address" to result to facilitate post-processing.
results = {"dns_rr_history": json.loads(json.dumps(rtn)), "resource_name": res,
"query_execution_time": query_execution_time}
yield StatusMessage("Returning 'dns_rr_history' results for resource '{}'.".format(res))
yield StatusMessage("Done...")
log.debug(json.dumps(results))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
log.exception("Exception in Resilient Function.")
yield FunctionError()
|
[
"travis@example.org"
] |
travis@example.org
|
489bcb93be6187df718facdbb8397383188b4e5d
|
024b434652c3e329bc7740c7b0c1776c7d6da54f
|
/cli/connect.py
|
0c1a53603414dd086019f90c68d2b9f84431d230
|
[
"MIT"
] |
permissive
|
Barski-lab/cwl-airflow-cli
|
3bffa85b519bc7ad645a798327af19fafb570639
|
ede2928fb1cb161ae985d11a365ae2e6da4afa97
|
refs/heads/master
| 2021-01-20T22:05:42.415130
| 2017-09-16T19:38:25
| 2017-09-16T19:38:25
| 101,799,674
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
#! /usr/bin/env python
import MySQLdb
import urlparse
class DbConnect:
"""Class to get access to DB"""
def __init__(self, conf):
self.sql_conn_url=urlparse.urlparse(conf.get('core', 'sql_alchemy_conn'))
self.connect()
def connect(self):
self.conn = MySQLdb.connect (host = self.sql_conn_url.hostname,
user = self.sql_conn_url.username,
passwd = self.sql_conn_url.password,
db = self.sql_conn_url.path.strip('/'),
port = self.sql_conn_url.port)
self.conn.set_character_set('utf8')
self.cursor = self.conn.cursor()
def close(self):
try:
self.conn.close()
except:
pass
|
[
"misha.kotliar@gmail.com"
] |
misha.kotliar@gmail.com
|
0f5cb35930f5208d39ec2592d907071525142e1b
|
08d163710a17497d81e2bc4b53c77b2c787f2baa
|
/src/dataset/gtsrb.py
|
a649dab3498c19c716ce3d6906fe22a5fc8ad9e1
|
[
"MIT"
] |
permissive
|
ngonthier/dti-sprites
|
7a9b80601b26bd760635d371500e34b15f409932
|
7c41b8bf15916f2ac14d6c0de795cd32e4689672
|
refs/heads/main
| 2023-04-30T21:51:01.183767
| 2021-05-14T11:56:14
| 2021-05-14T11:56:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
from abc import ABCMeta
from functools import lru_cache
from PIL import Image
import numpy as np
from torch.utils.data.dataset import Dataset as TorchDataset
from torchvision.transforms import Compose, Resize, ToTensor
from utils import coerce_to_path_and_check_exist, get_files_from_dir, use_seed
from utils.image import IMG_EXTENSIONS
from utils.path import DATASETS_PATH
class GTSRB8Dataset(TorchDataset):
root = DATASETS_PATH
name = 'gtsrb8'
n_channels = 3
n_classes = 8
img_size = (28, 28)
def __init__(self, split, **kwargs):
self.data_path = coerce_to_path_and_check_exist(self.root / 'GTSRB')
self.split = split
input_files = get_files_from_dir(self.data_path / 'train', IMG_EXTENSIONS, sort=True, recursive=True)
labels = [int(f.parent.name) for f in input_files]
self.input_files = np.asarray(input_files)
self.labels = np.asarray(labels)
# We filter the dataset to keep 8 classes
good_labels = {k: i for i, k in enumerate([3, 7, 9, 11, 17, 18, 25, 35])}
mask = np.isin(self.labels, list(good_labels.keys()))
self.input_files = self.input_files[mask]
self.labels = np.asarray([good_labels[l] for l in self.labels[mask]])
N = len(self.input_files)
if split == 'val':
with use_seed(46):
indices = np.random.choice(range(N), 100, replace=False)
self.input_files = self.input_files[indices]
self.labels = self.labels[indices]
def __len__(self):
return len(self.input_files)
def __getitem__(self, idx):
inp = self.transform(Image.open(self.input_files[idx]).convert('RGB'))
return inp, self.labels[idx]
@property
@lru_cache()
def transform(self):
return Compose([Resize(self.img_size), ToTensor()])
|
[
"tom.monnier@hotmail.fr"
] |
tom.monnier@hotmail.fr
|
c70ba4a760d383cbe9f83c83caa406e0a58dbc48
|
cc15af2ccc401b2d1d8fcfd219295121c1cece5d
|
/ROAR/utilities_module/module.py
|
1ca90b65b5f5f492aaf34919218b5ed8d633b876
|
[
"Apache-2.0"
] |
permissive
|
moonwonlee/ROAR
|
755277c5f79df67a78896e2739764eac6b7e0e7e
|
1e189d895ac34197b8c8fc3017970cb706feb3e6
|
refs/heads/main
| 2023-06-15T22:35:29.796573
| 2021-07-02T20:27:39
| 2021-07-02T20:27:39
| 359,383,030
| 1
| 0
|
Apache-2.0
| 2021-07-02T20:27:40
| 2021-04-19T08:23:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from abc import ABC, abstractmethod
import time
from pathlib import Path
class Module(ABC):
def __init__(self, threaded=False, update_interval: float = 0.5,
should_save: bool = False, name: str = "module", **kwargs):
self.threaded = threaded
self.update_interval = update_interval
self.should_continue_threaded = True
self.should_save = should_save
self.saving_dir_path: Path = Path(f"data/output/{name}")
if should_save and self.saving_dir_path.exists() is False:
self.saving_dir_path.mkdir(exist_ok=True, parents=True)
@abstractmethod
def run_in_series(self, **kwargs):
"""
This is the none-threaded function. It run in series!
Args:
**kwargs:
Returns:
"""
pass
def run_in_threaded(self, **kwargs):
"""
This is the threaded function.
Args:
**kwargs:
Returns:
"""
while self.should_continue_threaded:
self.run_in_series()
if self.should_save:
self.save()
time.sleep(self.update_interval)
def shutdown(self):
self.should_continue_threaded = False
@abstractmethod
def save(self, **kwargs):
pass
|
[
"wuxiaohua1011@berkeley.edu"
] |
wuxiaohua1011@berkeley.edu
|
b3aa8d40a9d1110d654d7483b38f77c4184c5fe8
|
770e2091d4d571bd4bfeeefbd0755129ef79d2cf
|
/matplotlib/5.py
|
e6dc037d574d73aa6187c890e876aff42ff3390f
|
[] |
no_license
|
youjia4321/data_analysis
|
24d4897562b07fa9f7effb2f4f6add9b87fb5807
|
4f2e4a0389e0bbf67b654b9e9fe12088133cddbe
|
refs/heads/master
| 2020-04-27T10:42:58.664049
| 2019-03-08T02:58:18
| 2019-03-08T02:58:18
| 174,266,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
import numpy as np
import matplotlib.pyplot as plt
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
# 数据的直方图
n, bins, patches = plt.hist(x, 50, density=1, facecolor='r', alpha=0.75)
# for i in patches:
# print(i)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100, \sigma=15$')
plt.text(40, .01, r'$\alpha_i > \beta_i$')
plt.text(70, .01, r'$\sum_{i=0}^\infty x_i$')
plt.axis([0, 160, 0, 0.03])
plt.grid(True) # 显示网格
plt.show()
|
[
"noreply@github.com"
] |
youjia4321.noreply@github.com
|
fad413cdf0ebf545f27260b7ed0593f679b3b9cf
|
e6bc1f55371786dad70313eb468a3ccf6000edaf
|
/Datasets/the-minion-game/Correct/076.py
|
fa39bfcd0f041cb2eb3e2c8fd566a8a06327f68b
|
[] |
no_license
|
prateksha/Source-Code-Similarity-Measurement
|
9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0
|
fb371b837917794d260a219a1ca09c46a5b15962
|
refs/heads/master
| 2023-01-04T07:49:25.138827
| 2020-10-25T14:43:57
| 2020-10-25T14:43:57
| 285,744,963
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
def minion_game(string):
vowel =['A','E','I','O','U']
S=0
K=0
for i in range(len(string)):
if string[i] in vowel:
K+= len(string)-i
else:
S+=len(string)-i
if S>K:
print("Stuart"+" "+ "%d" % S)
elif K>S:
print("Kevin"+" "+'%d' % K)
else:
print("Draw")
|
[
"pratekshau@gmail.com"
] |
pratekshau@gmail.com
|
93492b704f6fe05dc224243661ed0c2a8066ed38
|
67e817ca139ca039bd9eee5b1b789e5510119e83
|
/Tree/Kth Smallest Element in a BST.py
|
78fa4551d211d33dd4023977b36259e7db2cd425
|
[] |
no_license
|
dstch/my_leetcode
|
0dc41e7a2526c2d85b6b9b6602ac53f7a6ba9273
|
48a8c77e81cd49a75278551048028c492ec62994
|
refs/heads/master
| 2021-07-25T21:30:41.705258
| 2021-06-06T08:58:29
| 2021-06-06T08:58:29
| 164,360,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: dstch
@license: (C) Copyright 2013-2019, Regulus Tech.
@contact: dstch@163.com
@file: Kth Smallest Element in a BST.py
@time: 2019/9/11 22:08
@desc: Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
Note:
You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
Example 1:
Input: root = [3,1,4,null,2], k = 1
3
/ \
1 4
\
2
Output: 1
Example 2:
Input: root = [5,3,6,2,4,null,null,1], k = 3
5
/ \
3 6
/ \
2 4
/
1
Output: 3
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# BST中序遍历后是一个有序数组
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
return self.get_list(root, [])[k]
def get_list(self, root, res):
if root.left is not None:
res = self.get_list(root.left, res)
if root is not None:
res.append(root.val)
if root.right is not None:
res = self.get_list(root.right, res)
return res
# 分治思想
class Solution1:
def kthSmallest(self, root: TreeNode, k: int) -> int:
cnt = self.count(root.left)
if k <= cnt:
return self.kthSmallest(root.left, k)
elif k > cnt + 1:
return self.kthSmallest(root.right, k - cnt - 1)
return root.val
def count(self, root):
if root is None:
return 0
return 1 + self.count(root.left) + self.count(root.right)
|
[
"dstch@163.com"
] |
dstch@163.com
|
7835d7fea00e4dcad46bf9117f2c76e5d0e341a4
|
65b4522c04c2be071c2d42095956fe950fe1cebe
|
/inversions/inversion11/best_model/predict/analysis/pred_disp_large_scale/plots/post_disp/gen_post_disp.py
|
2d7cabac2102b3f35527307245505ab7abce8bca
|
[] |
no_license
|
geodesy/viscojapan
|
ac0cd93f7a2134cd2651623b94879dcc21c0c46a
|
03e70265b56eb5994e73bcb6066f0be338e42f27
|
refs/heads/master
| 2021-03-03T18:19:07.779601
| 2015-07-16T03:50:49
| 2015-07-16T03:50:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
import numpy as np
import viscojapan as vj
res_file='/home/zy/workspace/viscojapan/inversions/inversion10/iter2/run7/outs/nrough_05_naslip_11.h5'
reader = vj.inv.ResultFileReader(res_file)
disp = reader.get_pred_disp()
post = disp.get_post_at_nth_epoch(-1)
sites = disp.get_sites
tp = np.loadtxt('stations_large_scale.in','4a, f, f')
pos_dic = {ii[0].decode():(ii[1], ii[2]) for ii in tp}
with open('post_disp2','wt') as fid:
for site, y in zip(sites, post):
lon, lat = pos_dic[site.id]
mag = np.sqrt(y[0]**2+y[1]**2)
fid.write('%f %f %f\n'%(lon, lat, mag))
|
[
"zy31415@gmail.com"
] |
zy31415@gmail.com
|
31c1779398b3ce2aad134e0fb09067334f0f0976
|
a9e27f69b8db430252cd29f334f182b9962e22ae
|
/src/collective/deformwidgets/dynatree.py
|
2dcec4d7bb40a7798e2a521b94322c3fd90574f1
|
[] |
no_license
|
collective/collective.deformwidgets
|
d4b09da0ae935f16437f56f77351e52f3ccace81
|
fef9e81e5e448de30fcd654a68f1cfd98e7aeb9d
|
refs/heads/master
| 2023-03-22T11:55:37.251077
| 2012-10-11T21:20:53
| 2012-10-12T06:10:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,384
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from imsvdex.vdex import VDEXManager
from Products.CMFCore.utils import getToolByName
from zope.app.component.hooks import getSite
import colander
import deform
import json
def vdex_to_dynatree(vdex=None):
'''
Convert a vdex manager object
to something understandable by a dynatree widget
'''
retval = []
def convert(key, value):
''' converter '''
retval = {}
retval['title'] = value[0]
retval['key'] = key
if value[1]:
children_keys = value[1].keys()
children_keys.sort()
retval['children'] = [convert(x, value[1][x]) for x in
children_keys]
return retval
vdex_dict = vdex.getVocabularyDict()
keys = vdex_dict.keys()
keys.sort()
for key in keys:
retval.append(convert(key, vdex_dict[key]))
return retval
class DynatreeWidgetContentBrowser(deform.widget.SelectWidget):
'''
Renders a ``dynatree`` widget to select contents of a site
**Attributes/Arguments**
vocabulary
An imsvdex.vdex.VDEXManager object
Can also be provided by the field
null_value
The value which represents the null value. When the null
value is encountered during serialization, the
:attr:`colander.null` sentinel is returned to the caller.
Default: ``\'\'`` (the empty string).
template
The template name used to render the widget. Default:
``dynatree``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/dynatree``.
'''
template = 'dynatree_content'
readonly_template = 'readonly/dynatree_content'
null_value = ''
vocabulary = None
requirements = (('jquery.dynatree', None), )
selectMode = '2'
@staticmethod
def convert_cstruct(cstruct):
''' return cstruct jsonified, wrapped in a list if necessary '''
if cstruct in (colander.null, None):
return json.dumps([])
else:
return json.dumps([cstruct])
def get_preselected_values(self, cstruct, readonly):
'''
for the preselected keys, get the values if necessary.
Necessary as in the values aren\'t used in write case.
Since computation is expensive, we return an empty
list if readonly is set to false
'''
if readonly:
retval = []
for key in cstruct or []:
term = self.vocabulary.getTermById(key)
retval.append(self.vocabulary.getTermCaption(term))
return retval
else:
return []
@staticmethod
def get_item_child_name(dummy):
''' Return the name of the item child '''
return 'null'
def titles(self, values):
site = getSite()
catalog = getToolByName(site, 'portal_catalog')
for brain in catalog.searchResults(UID=values):
yield brain.Title
@property
def tree(self):
''' return the tree datastructure as needed by dynatree, jsonified '''
return json.dumps(vdex_to_dynatree(vdex=self.vocabulary))
def serialize(
self,
field,
cstruct,
readonly=False,
):
template = readonly and self.readonly_template or self.template
return field.renderer(
template,
site_url=getSite().absolute_url(),
field=field,
object_provides_filter=getattr(field.schema,
'object_provides_filter', ''),
values=cstruct,
titles=self.titles(cstruct),
dynatree_parameters='',
fieldName=field.name,
)
class SingleSelectDynatreeWidget(deform.widget.SelectWidget):
'''
Renders a ``dynatree`` widget based on a predefined set of values.
**Attributes/Arguments**
vocabulary
An imsvdex.vdex.VDEXManager object
Can also be provided by the field
null_value
The value which represents the null value. When the null
value is encountered during serialization, the
:attr:`colander.null` sentinel is returned to the caller.
Default: ``\'\'`` (the empty string).
template
The template name used to render the widget. Default:
``dynatree``.
readonly_template
The template name used to render the widget in read-only mode.
Default: ``readonly/dynatree``.
'''
template = 'dynatree'
readonly_template = 'readonly/dynatree'
null_value = ''
vocabulary = None
requirements = (('jquery.dynatree', None), )
selectMode = '1'
@staticmethod
def convert_cstruct(cstruct):
''' return cstruct jsonified, wrapped in a list if necessary '''
if cstruct in (colander.null, None):
return json.dumps([])
else:
return json.dumps([cstruct])
def get_preselected_values(self, cstruct, readonly):
'''
for the preselected keys, get the values if necessary.
Necessary as in the values aren\'t used in write case.
Since computation is expensive, we return an empty
list if readonly is set to false
'''
if readonly:
retval = []
for key in cstruct or []:
term = self.vocabulary.getTermById(key)
retval.append(self.vocabulary.getTermCaption(term))
return retval
else:
return []
@staticmethod
def get_item_child_name(dummy):
''' Return the name of the item child '''
return 'null'
@property
def tree(self):
''' return the tree datastructure as needed by dynatree, jsonified '''
return json.dumps(vdex_to_dynatree(vdex=self.vocabulary))
def serialize(
self,
field,
cstruct,
readonly=False,
):
if not self.vocabulary:
self.vocabulary = getattr(field.schema, 'vocabulary',
self.vocabulary)
assert self.vocabulary, 'You must give me a vocabulary'
template = readonly and self.readonly_template or self.template
return field.renderer(
template,
field=field,
preselected=self.convert_cstruct(cstruct),
preselected_values=self.get_preselected_values(cstruct,
readonly),
tree=self.tree,
select_mode=self.selectMode,
item_name=field.name,
item_child_name=self.get_item_child_name(field),
)
class MultiSelectDynatreeWidget(SingleSelectDynatreeWidget):
''' Dynatree widget for sequence fields '''
selectMode = '2'
@staticmethod
def convert_cstruct(cstruct):
''' return cstruct jsonified, wrapped in a list if necessary '''
if cstruct in (colander.null, None):
return json.dumps([])
else:
return json.dumps(cstruct)
@staticmethod
def get_item_child_name(field):
''' Return the name of the item child '''
return field.children[0].name
class MultiSelectMode3DynatreeWidget(MultiSelectDynatreeWidget):
''' Dynatree widget for sequence fields mode 2 '''
selectMode = 3
|
[
"do3cc@patrick-gerken.de"
] |
do3cc@patrick-gerken.de
|
0002d483929a2213dc78b3654a3d16d500e71d6f
|
c80ec1805a7e6cb1bd3f4b3e383ef4f4cf164765
|
/gen/filters/rules/family/_hastwins.py
|
c2e37d04b70c8905222c5247ba01261e08eced18
|
[] |
no_license
|
balrok/gramps_addon
|
57c8e976c47ea3c1d1298d3fd4406c13909ac933
|
0c79561bed7ff42c88714edbc85197fa9235e188
|
refs/heads/master
| 2020-04-16T03:58:27.818732
| 2015-02-01T14:17:44
| 2015-02-01T14:17:44
| 30,111,898
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2013 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from ....lib.childreftype import ChildRefType
#-------------------------------------------------------------------------
#
# HasTwins
#
#-------------------------------------------------------------------------
class HasTwins(Rule):
"""Rule that checks for a family with twins"""
name = _('Families with twins')
description = _("Matches families with twins")
category = _('Child filters')
def apply(self, db, family):
date_list = []
for childref in family.get_child_ref_list():
if int(childref.get_mother_relation()) == ChildRefType.BIRTH:
child = db.get_person_from_handle(childref.ref)
birthref = child.get_birth_ref()
if birthref:
birth = db.get_event_from_handle(birthref.ref)
sortval = birth.get_date_object().get_sort_value()
if sortval != 0:
if sortval in date_list:
return True
else:
date_list.append(sortval)
return False
|
[
"carl.schoenbach@gmail.com"
] |
carl.schoenbach@gmail.com
|
7b8b1e3bf65134928bbed9628517cb616c9a5cb4
|
9d58c796906c6687241125ee6602cc612cb28735
|
/FileRstproj/FileRstproj/wsgi.py
|
18849c0de1d7746d732056d98c0d2fce323e9eb0
|
[] |
no_license
|
uday99/RestFrameworkUploadfile
|
29de0929ad7c2af2d4e4a856801467b05ce53829
|
6b92f0d55893e435f4383a4691a79642be28923f
|
refs/heads/main
| 2023-07-10T18:40:27.225900
| 2021-08-19T15:21:09
| 2021-08-19T15:21:09
| 397,986,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for FileRstproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FileRstproj.settings')
application = get_wsgi_application()
|
[
"udaykumarandolu@gmail.com"
] |
udaykumarandolu@gmail.com
|
f193b93fe7ed2fb7532ba5e080503c1e75e790b1
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/count-of-positives-slash-sum-of-negatives/Python/test.py
|
61c68d640b924eca1c079ae4f9c87625028e4368
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
# Python - 3.4.3
Test.describe("Basic tests")
Test.assert_equals(count_positives_sum_negatives([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, -11, -12, -13, -14, -15]),[10,-65])
Test.assert_equals(count_positives_sum_negatives([0, 2, 3, 0, 5, 6, 7, 8, 9, 10, -11, -12, -13, -14]),[8,-50])
Test.assert_equals(count_positives_sum_negatives([1]),[1,0])
Test.assert_equals(count_positives_sum_negatives([-1]),[0,-1])
Test.assert_equals(count_positives_sum_negatives([0,0,0,0,0,0,0,0,0]),[0,0])
Test.assert_equals(count_positives_sum_negatives([]),[])
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
3580008934feddc279f08ed0aa8247a1544c764e
|
a519c248ccac7cfb4c934b5ad1159f4937117fba
|
/More_String_Manipulation/correct_password.py
|
c97737688bac4ff84491704a13c359e3ebae10d0
|
[] |
no_license
|
kaci65/Nichola_Lacey_Python_By_Example_BOOK
|
1ae5654b82c01e320eaf8d1e41fb03804c0786fc
|
2cc18d2d8351a990cee31e253c4bcb298ba4c266
|
refs/heads/main
| 2023-04-12T18:16:03.663399
| 2021-05-02T14:34:00
| 2021-05-02T14:34:00
| 346,003,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
#!/usr/bin/python3
"""check if user input is correct the secondtime round"""
passwrd = input("Please enter password: ")
passwrd2 = input("Please confirm password: ")
if passwrd == passwrd2:
print("Thank you")
elif passwrd.islower() != passwrd2.islower():
print("They must be in the same case")
else:
print("Incorrect")
|
[
"wanjikukarugi@gmail.com"
] |
wanjikukarugi@gmail.com
|
cc1c4654fe4f9af3d5dcd665eaad982c2c01d813
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02900/s932480273.py
|
879d6d669a5cb836b2df581b153b4b2ff7c9129f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
a,b=map(int,input().split())
def factorization(n):
a=[]
while n%2==0:
a.append(2)
n//=2
f=3
while f*f<=n:
if n%f==0:
a.append(f)
n//=f
else:
f+=2
if n!=1:
a.append(n)
return a
s_a=set(factorization(a))
s_b=set(factorization(b))
ans_arr=s_a&s_b
print(len(ans_arr)+1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b363e8058c2887db3c204ca9ca827bc385baa3b7
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pPyAgyeNEvQsBytaR_19.py
|
4aff1636b0a9b914bbbca2ca40fe25b9d7d42fe7
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
"""
Write a function that calculates the **factorial** of a number
**recursively**.
### Examples
factorial(5) ➞ 120
factorial(3) ➞ 6
factorial(1) ➞ 1
factorial(0) ➞ 1
### Notes
N/A
"""
def factorial(n):
counter = n
counter2 = n
if n == 0:
return 1
for x in range (counter2):
counter = counter - 1
if x < n and counter > 0:
n = n * counter
else:
n = n
return n
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
af549c5f28809aa0ff4c12ecd2d66aad2f41e951
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/large-scale-multiobj2/core-arch5-guard0-beta0-hebbe/pareto25.py
|
24ae69bfb9cc96c1eae6bea2eb789a9b5259fd40
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch5_decomposition_new import Arch5_decompose
np.random.seed(2010)
num_cores=10
num_slots=320
i = 5
filename = 'traffic_matrix_pod250_load50_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
#%% arch2
corev = [6, 8]
connection_ub = []
throughput_ub = []
obj_ub = []
connection_lb = []
throughput_lb = []
obj_lb = []
connection_he = []
throughput_he = []
obj_he = []
for c in corev:
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=c,
alpha=1,beta=0)
m.create_model_routing(mipfocus=1,timelimit=36000,mipgap=0.01, method=3,
threads=20)
connection_ub.append(m.connection_ub_)
throughput_ub.append(m.throughput_ub_)
obj_ub.append(m.obj_ub_)
np.save('core_usagex_i%d_c%d.npy'%(i,c), m.core_usagex)
# m.create_model_sa(mipfocus=1,timelimit=26000,mipgap=0.01, method=2,
# SubMIPNodes=2000, heuristics=0.8, threads=4, presolve=2)
# connection_lb.append(m.connection_lb_)
# throughput_lb.append(m.throughput_lb_)
# obj_lb.append(m.obj_lb_)
# m.write_result_csv('cnklist_lb_%d_%d.csv'%(i,c), m.cnklist_lb)
connection_lb.append(0)
throughput_lb.append(0)
obj_lb.append(0)
# m.heuristic()
# connection_he.append(m.obj_heuristic_connection_)
# throughput_he.append(m.obj_heuristic_throughput_)
# obj_he.append(m.obj_heuristic_)
# m.write_result_csv('cnklist_heuristic_%d_%d.csv'%(i,c),
# m.cnklist_heuristic_)
connection_he.append(0)
throughput_he.append(0)
obj_he.append(0)
result = np.array([corev,
connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb,
connection_he,throughput_he,obj_he]).T
file_name = "result_pareto_arch5_old_2_{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb',
'connection_he', 'throughput_he', 'obj_he'])
writer.writerows(result)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
38043e53324b35022507d8c661ba877f03024257
|
a92db55b2a21ac3e4191e22800c66f8155c39f18
|
/backendrooms/Chat/urls.py
|
c850fd056c1be7ea8dd3ff01e4d9ad9ab67f554c
|
[] |
no_license
|
thaopanda/EasyAccomd
|
046fc4520d0234e66ab6b49c2da2c20db0d38d1c
|
f9e68833ac1a57ca1cdd3e5db37fd15ecdef3ef1
|
refs/heads/master
| 2023-03-26T07:48:16.038758
| 2021-03-25T05:03:45
| 2021-03-25T05:03:45
| 351,314,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from django.contrib import admin
from django.urls import path, include
from Chat import views
urlpatterns = [
path('thread/', views.GetThread.as_view()),
path('chat/<int:pk>/<int:begin>/<int:end>/', views.GetChat.as_view()),
path('threadAdmin/<str:username>/', views.GetThreadAdmin.as_view()),
]
|
[
"thaoa4vuive@gmail.com"
] |
thaoa4vuive@gmail.com
|
89e123beb4ec1f95633bb5373c04ca769c7422d5
|
904b71e153361110dad46a9bf204011adfeab429
|
/realtime_accl_graph.py
|
c4a718928dd84407bff9e3aa0c663816caf5d302
|
[] |
no_license
|
MiyabiTane/BDM
|
dbe4c4de0910fa7519fdc2d4967c996e01462a04
|
841bed04bbaaae80b91dfd73259a74677f25820c
|
refs/heads/master
| 2020-09-06T07:39:50.374633
| 2019-12-20T14:50:53
| 2019-12-20T14:50:53
| 220,366,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
# -*- coding: utf-8 -*-
"""
matplotlibでリアルタイムプロットする例
無限にsin関数をplotし続ける
"""
from __future__ import unicode_literals, print_function
import numpy as np
import matplotlib.pyplot as plt
import smbus
import time
def pause_plot():
plt.figure(figsize=(10,8))
plt.subplots_adjust(hspace=1)
ax1 = plt.subplot(311)
ax2 = plt.subplot(312)
ax3 = plt.subplot(313)
x = [-0.9,-0.8,-0.7,-0.6,-0.5,-0.4,-0.3,-0.2,-0.1,0] #np.arange(-1, 1, 0.1)
y_x = [0,0,0,0,0,0,0,0,0,0] #np.sin(x)
y_y = [0,0,0,0,0,0,0,0,0,0]
y_z = [0,0,0,0,0,0,0,0,0,0]
ax1.set_xlim(-1, 0)
ax1.set_ylim(-1, 1)
ax2.set_xlim(-1, 0)
ax2.set_ylim(-1, 1)
ax3.set_xlim(-1, 0)
ax2.set_ylim(-1, 1)
# 初期化的に一度plotしなければならない
# そのときplotしたオブジェクトを受け取る受け取る必要がある.
# listが返ってくるので,注意
lines1, = ax1.plot(x, y_x, color="red")
ax1.set_title("acceleration x-axis")
lines2, = ax2.plot(x, y_y, color="blue")
ax2.set_title("acceleration y-axis")
lines3, = ax3.plot(x, y_z, color="green")
ax3.set_title("acceleration z-axis")
I2C_ADDR=0x1d
# Get I2C bus
bus = smbus.SMBus(1)
# Select Control register, 0x2A(42)
# 0x00(00) StandBy mode
bus.write_byte_data(I2C_ADDR, 0x2A, 0x00)
# Select Control register, 0x2A(42)
# 0x01(01) Active mode
bus.write_byte_data(I2C_ADDR, 0x2A, 0x01)
# Select Configuration register, 0x0E(14)
# 0x00(00) Set range to +/- 2g
bus.write_byte_data(I2C_ADDR, 0x0E, 0x00)
time.sleep(0.5)
# ここから無限にplotする
while True:
# plotデータの更新
data = bus.read_i2c_block_data(I2C_ADDR, 0x00, 7)
xAccl = (data[1] * 256 + data[2]) / 16
if xAccl > 2047 :
xAccl -= 4096
yAccl = (data[3] * 256 + data[4]) / 16
if yAccl > 2047 :
yAccl -= 4096
zAccl = (data[5] * 256 + data[6]) / 16
if zAccl > 2047 :
zAccl -= 4096
x = map(lambda p: p+0.1, x)
y_x.pop(0)
y_x.append(xAccl)
lines1.set_data(x, y_x)
ax1.set_xlim(min(x), max(x))
ax1.set_ylim(min(y_x)-10, max(y_x)+10)
y_y.pop(0)
y_y.append(yAccl)
lines2.set_data(x, y_y)
ax2.set_xlim(min(x), max(x))
ax2.set_ylim(min(y_y)-10, max(y_y)+10)
y_z.pop(0)
y_z.append(zAccl)
lines3.set_data(x, y_z)
ax3.set_xlim(min(x), max(x))
ax3.set_ylim(min(y_z)-10, max(y_z)+10)
# set_data()を使うと軸とかは自動設定されないっぽいので,
# 今回の例だとあっという間にsinカーブが描画範囲からいなくなる.
# そのためx軸の範囲は適宜修正してやる必要がある.
#ax.set_xlim(x-1, x)
# 一番のポイント
# - plt.show() ブロッキングされてリアルタイムに描写できない
# - plt.ion() + plt.draw() グラフウインドウが固まってプログラムが止まるから使えない
# ----> plt.pause(interval) これを使う!!! 引数はsleep時間
time.sleep(0.1)
plt.pause(.01)
if __name__ == "__main__":
pause_plot()
|
[
"miyabitane@yahoo.co.jp"
] |
miyabitane@yahoo.co.jp
|
74d9d462e821b53877b54326c9b3fcdc6186e434
|
c4702d1a06640555829b367852138cc93ba4a161
|
/dealer_sale_order/report/dealer_sale_order_dp_report.py
|
c8e2704dea9d21e634595e4ff5b1bf37fe55262f
|
[] |
no_license
|
Rizalimami/dym
|
0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26
|
af1bcf7b77a3212bc8a8a0e41e6042a134587ed4
|
refs/heads/master
| 2020-04-08T10:56:43.605698
| 2018-11-27T06:44:08
| 2018-11-27T06:44:08
| 159,287,876
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
import time
from datetime import datetime
from openerp.report import report_sxw
from openerp.osv import osv
from openerp import pooler
import fungsi_terbilang
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
import pytz
from openerp.tools.translate import _
import base64
class dealer_sale_order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context=None):
super(dealer_sale_order, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'no_urut': self.no_urut,
'terbilang': self.terbilang,
'invoice_id': self.invoice_id,
'waktu_local': self.waktu_local,
})
self.no = 0
def no_urut(self):
self.no+=1
return self.no
def terbilang(self,amount):
hasil = fungsi_terbilang.terbilang(amount, "idr", 'id')
return hasil
def invoice_id(self):
invoice = self.pool.get('dealer.sale.order').browse(self.cr, self.uid, self.ids).name
invoice2 = self.pool.get('account.invoice').search(self.cr, self.uid,[ ('origin','ilike',invoice),('tipe','=','customer') ])
no_invoice = self.pool.get('account.invoice').browse(self.cr, self.uid,invoice2).number
return no_invoice
def waktu_local(self):
tanggal = datetime.now().strftime('%y%m%d')
menit = datetime.now()
user = self.pool.get('res.users').browse(self.cr, self.uid, self.uid)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
start = pytz.utc.localize(menit).astimezone(tz)
start_date = start.strftime("%d-%m-%Y %H:%M")
return start_date
report_sxw.report_sxw('report.rml.dealer.sale.order.dp.po', 'dealer.sale.order', 'addons/dealer_sale_order/report/dealer_sale_order_dp_report.rml', parser = dealer_sale_order, header = False)
|
[
"rizal@portcities.net"
] |
rizal@portcities.net
|
0c38900605baafbc66468be8b179b55caad9d3e2
|
e03e59d67c96c1afa0a1c76e62235a3e3f639976
|
/django_test7sangpum_exam/django_test7sangpum_exam/urls.py
|
0df11edfa6c35162ba1bb27fad7cc5209cac88cd
|
[] |
no_license
|
kangmihee/EX_python
|
10a63484802e6ff5454f12f7ade7e277dbf3df97
|
0a8dafe667f188cd89ef7f021823f6b4a9033dc0
|
refs/heads/master
| 2020-07-02T00:23:05.465127
| 2019-09-03T07:49:46
| 2019-09-03T07:49:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
"""django_test7sangpum_exam URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from examapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.Main),
path('list', views.list),
path('list/insert', views.insert),
path('list/insertok', views.insertok),
]
|
[
"acorn@acorn-PC"
] |
acorn@acorn-PC
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.