blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
824d0c03e344d4cfbe4f90368738c039cebdc5ad | 75dcb56e318688499bdab789262839e7f58bd4f6 | /Cracking Coding Interviews - Mastering Algorithms/template/Section 2 Arrays and Strings/2sum.py | e6e304637b4ab114bf1c20cf266eef1315d241f2 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 571 | py | # # Given an array of integers, return indices of the two numbers such that
# # they add up to a specific target.
#
#
# # Example 1
# # nums = [2, 7, 11, 15], target = 9
# # return [0, 1]
#
# # 7 => 0
#
# # Example 2
# # nums = [2, 7, 11, 15, 4, 23, 19, 5], target = 19
# # return [3, 4]
#
# # Brute force O(n^2)
# # O(n) + O(n)
#
# ___ twosum nums, target
# dic _ # dict
#
# ___ i __ r.. l.. ?
# __ ? ? __ ?.k..
# r_ ? ? ?, ?
# ____
# ?|? - ? ? _ ?
#
# r_ # list
#
# print(twosum([2, 7, 11, 15], 9))
# print(twosum([2, 7, 11, 15, 4, 23, 19, 5], 19))
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3051eb436bb6c5dedc87ae2f07d7ced426980dd1 | ce990be34e8759efb96b890d9676da313fd2d9b4 | /python/tvm/runtime/executor/aot_executor.py | 9ef0d1dee8947d0abfae04e7694a4b353d91c70e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | tmoreau89/tvm | 291c0b1beb13503e18b1e45f135aaf334660b68d | 8136173a631bf6c7274d26285349225fcf6e495f | refs/heads/master | 2022-11-23T08:36:24.853648 | 2022-11-21T07:36:57 | 2022-11-21T07:36:57 | 119,757,672 | 5 | 1 | Apache-2.0 | 2019-03-22T23:06:53 | 2018-01-31T23:41:33 | Python | UTF-8 | Python | false | false | 5,411 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A Python wrapper for the Module-based Model Runtime Interface for Ahead-of-Time compilation."""
import numpy as np
class AotModule(object):
"""Wraps the AOT executor runtime.Module.
This is a thin wrapper of the underlying TVM module.
you can also directly call set_input, run, and get_output
of underlying module functions
Parameters
----------
module : tvm.runtime.Module
The internal tvm module that holds the implemented model functions.
Attributes
----------
module : tvm.runtime.Module
The internal tvm module that holds the implemented model functions.
Examples
--------
.. code-block:: python
import tvm
from tvm import relay
from tvm.contrib import graph_executor
# build the library using graph executor
lib = relay.build(...)
lib.export_library("compiled_lib.so")
# load it back as a runtime
lib: tvm.runtime.Module = tvm.runtime.load_module("compiled_lib.so")
# Call the library factory function for default and create
# a new runtime.Module, wrap with aot module.
gmod = tvm.runtime.executor.AotModule(lib["default"](dev))
# use the aot module.
gmod.set_input("x", data)
gmod.run()
"""
def __init__(self, module):
self.module = module
self._set_input = module["set_input"]
self._run = module["run"]
self._get_output = module["get_output"]
self._get_input = module["get_input"]
self._get_num_outputs = module["get_num_outputs"]
self._get_input_index = module["get_input_index"]
self._get_num_inputs = module["get_num_inputs"]
def set_input(self, key=None, value=None, **params):
"""Set inputs to the module via kwargs
Parameters
----------
key : int or str
The input key
value : the input value.
The input key
params : dict of str to NDArray
Additional arguments
"""
if key is not None:
v = self._get_input(key)
if v is None:
raise RuntimeError("Could not find '%s' in model's inputs" % key)
v.copyfrom(value)
if params:
# upload big arrays first to avoid memory issue in rpc mode
keys = list(params.keys())
keys.sort(key=lambda x: -np.prod(params[x].shape))
for k in keys:
# TODO(zhiics) Skip the weights for submodule in a better way.
# We should use MetadataModule for initialization and remove
# params from set_input
val = self._get_input(k)
if val:
self._get_input(k).copyfrom(params[k])
def run(self, **input_dict):
"""Run forward execution of the model
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
self._run()
def get_num_outputs(self):
"""Get the number of outputs from the model
Returns
-------
count : int
The number of outputs.
"""
return self._get_num_outputs()
def get_num_inputs(self):
"""Get the number of inputs to the model
Returns
-------
count : int
The number of inputs.
"""
return self._get_num_inputs()
def get_input(self, index, out=None):
"""Get index-th input to out
Parameters
----------
index : int
The input index
out : NDArray
The output array container
"""
if out:
self._get_input(index).copyto(out)
return out
return self._get_input(index)
def get_input_index(self, name):
"""Get inputs index via input name.
Parameters
----------
name : str
The input key name
Returns
-------
index: int
The input index. -1 will be returned if the given input name is not found.
"""
return self._get_input_index(name)
def get_output(self, index, out=None):
"""Get index-th output to out
Parameters
----------
index : int
The output index
out : NDArray
The output array container
"""
if out:
self._get_output(index, out)
return out
return self._get_output(index)
| [
"noreply@github.com"
] | tmoreau89.noreply@github.com |
244e45fa4d06df710a07eca2f04f51aa99aee203 | 720ea7ad783325f006e39c7e7d6a49ec0d31f4ef | /myconnectome/taskfmri/map_feats_to_surface.py | 3d1a72e8e763fb407483285e65da2cc5f7943a60 | [
"MIT"
] | permissive | poldrack/myconnectome | 2c364de97813ab845ab2d2263f9be1a51fd599bc | 8c0217196f28b8f785dabe62f772b4e50a729f1b | refs/heads/master | 2023-09-02T17:50:48.296363 | 2023-08-12T17:05:28 | 2023-08-12T17:05:28 | 21,547,922 | 31 | 10 | null | 2017-03-02T16:56:13 | 2014-07-06T20:22:28 | Jupyter Notebook | UTF-8 | Python | false | false | 3,410 | py | """
map example func and stats images to surface
"""
import os,sys,glob
from run_shell_cmd import run_shell_cmd
import tempfile
sys.path.insert(0,'/home1/01329/poldrack/code/nipype')
sys.path.append('/corral-repl/utexas/poldracklab/code/poldrack/selftracking')
scriptdir='/corral-repl/utexas/poldracklab/data/selftracking/code'
base_dir=os.path.abspath('/corral-repl/utexas/poldracklab/data/selftracking/')
try:
subcode=sys.argv[1]
taskcode=int(sys.argv[2])
runcode=int(sys.argv[3])
assert os.path.exists(os.path.join(base_dir,subcode,'model/model%03d/task%03d_run%03d.feat'%(taskcode,taskcode,runcode)))
except:
subcode='sub091'
taskcode=2
runcode=1
import nipype.interfaces.fsl as fsl
featdir=os.path.join(base_dir,subcode,'model/model%03d/task%03d_run%03d_333.feat'%(taskcode,taskcode,runcode))
exfunc=os.path.join(featdir,'example_func.nii.gz')
surfaces={}
surfaces['midthickness']={'L':'/corral-repl/utexas/poldracklab/data/selftracking/FREESURFER_fs_LR/7112b_fs_LR/fsaverage_LR32k/sub013.L.midthickness.32k_fs_LR.surf.gii','R':'/corral-repl/utexas/poldracklab/data/selftracking/FREESURFER_fs_LR/7112b_fs_LR/fsaverage_LR32k/sub013.R.midthickness.32k_fs_LR.surf.gii'}
surfaces['white']={'L':'/corral-repl/utexas/poldracklab/data/selftracking/FREESURFER_fs_LR/7112b_fs_LR/fsaverage_LR32k/sub013.L.white.32k_fs_LR.surf.gii','R':'/corral-repl/utexas/poldracklab/data/selftracking/FREESURFER_fs_LR/7112b_fs_LR/fsaverage_LR32k/sub013.R.white.32k_fs_LR.surf.gii'}
surfaces['pial']={'L':'/corral-repl/utexas/poldracklab/data/selftracking/FREESURFER_fs_LR/7112b_fs_LR/fsaverage_LR32k/sub013.L.pial.32k_fs_LR.surf.gii','R':'/corral-repl/utexas/poldracklab/data/selftracking/FREESURFER_fs_LR/7112b_fs_LR/fsaverage_LR32k/sub013.R.pial.32k_fs_LR.surf.gii'}
tasknames={1:{1:'nback',2:'dotmotion',3:'faceloc1',4:'superloc',5:'grid'},2:{1:'nback',2:'dotmotion',3:'faceloc2',4:'superloc',5:'grid'}}
newstatsdir=os.path.join(featdir,'stats_pipeline')
if not os.path.exists(newstatsdir):
os.mkdir(newstatsdir)
for filetype in ['cope','varcope','zstat']:
copefiles=glob.glob(os.path.join(featdir,'stats/%s*.nii.gz'%filetype))
for c in copefiles:
copenum=int(c.split('/')[-1].split('.')[0].replace(filetype,''))
for hemis in ['L','R']:
outfile=os.path.join(newstatsdir,'%s%03d.%s.func.gii'%(filetype,copenum,hemis))
goodvoxmask=os.path.join('/corral-repl/utexas/poldracklab/data/selftracking/volume_goodvoxels','%s_%s_goodvoxels_333.nii.gz'%(subcode,tasknames[runcode][taskcode]))
assert os.path.exists(goodvoxmask)
wb_command='wb_command -volume-to-surface-mapping %s %s %s -ribbon-constrained %s %s -volume-roi %s'%(c,surfaces['midthickness'][hemis],outfile,surfaces['white'][hemis],surfaces['pial'][hemis],goodvoxmask)
if not os.path.exists(outfile):
print wb_command
run_shell_cmd(wb_command)
if hemis=='L':
structure='CORTEX_LEFT'
else:
structure='CORTEX_RIGHT'
wb_command='wb_command -set-structure %s %s'%(outfile,structure)
print wb_command
run_shell_cmd(wb_command)
wb_command= 'wb_command -metric-smoothing %s %s 1.5 %s'%(surfaces['midthickness'][hemis],outfile,outfile.replace('func.','smoothed.func.'))
print wb_command
run_shell_cmd(wb_command)
| [
"poldrack@gmail.com"
] | poldrack@gmail.com |
90c3f4703a1dea484873bba36f03b842b97aaf14 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/classification/Resnet50_Cifar_for_PyTorch/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py | 4c2a96b8354675ba93e3e98fa6e97d0ac77ea7da | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,787 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = [
'../_base_/models/efficientnet_b5.py',
'../_base_/datasets/imagenet_bs32.py',
'../_base_/schedules/imagenet_bs256.py',
'../_base_/default_runtime.py',
]
# dataset settings
dataset_type = 'ImageNet'
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
size=456,
efficientnet_style=True,
interpolation='bicubic'),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='CenterCrop',
crop_size=456,
efficientnet_style=True,
interpolation='bicubic'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
c2eb1b0fc3e91de90f7efc1f3ed6b62c9ed91f9e | ee8855f9405bd96bb77e07cf852d6800a6fa07c6 | /testing/cross_language/mac_test.py | 27f6f188ec98c74c107bdc69cd2559a39d5a5782 | [
"Apache-2.0"
] | permissive | RAJSINGH-Bitian/tink | f286d40ef452f15984e748e042c0b55784c764de | b3fa4e47216fcb2aeaf952d059132f8c39451c2c | refs/heads/master | 2023-07-16T20:14:08.650806 | 2021-08-31T10:19:27 | 2021-08-31T10:19:27 | 401,657,836 | 0 | 0 | Apache-2.0 | 2021-08-31T10:11:13 | 2021-08-31T10:11:13 | null | UTF-8 | Python | false | false | 7,108 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for the MAC primitive."""
# Placeholder for import for type annotations
from typing import Iterable, Text, Tuple
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import mac
from tink.proto import tink_pb2
from tink.testing import keyset_builder
from util import supported_key_types
from util import testing_servers
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['mac']
def setUpModule():
mac.register()
testing_servers.start('mac')
def tearDownModule():
testing_servers.stop()
def mac_key_templates() -> Iterable[Tuple[Text, tink_pb2.KeyTemplate]]:
"""Yields (mac_key_template_name, mac_key_template) tuples."""
for key_type in supported_key_types.MAC_KEY_TYPES:
for name in supported_key_types.KEY_TEMPLATE_NAMES[key_type]:
template = supported_key_types.KEY_TEMPLATE[name]
yield (name, template)
yield (name + '-raw', keyset_builder.raw_template(template))
yield (name + '-legacy', keyset_builder.legacy_template(template))
class MacTest(parameterized.TestCase):
@parameterized.parameters(mac_key_templates())
def test_compute_verify_mac(self, key_template_name, key_template):
key_type = supported_key_types.KEY_TYPE_FROM_URL[key_template.type_url]
supported_langs = supported_key_types.SUPPORTED_LANGUAGES[key_type]
self.assertNotEmpty(supported_langs)
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
supported_macs = [
testing_servers.mac(lang, keyset) for lang in supported_langs
]
unsupported_macs = [
testing_servers.mac(lang, keyset)
for lang in SUPPORTED_LANGUAGES
if lang not in supported_langs
]
for p in supported_macs:
data = (
b'This is some data to be authenticated using key_template '
b'%s in %s.' % (key_template_name.encode('utf8'),
p.lang.encode('utf8')))
mac_value = p.compute_mac(data)
for p2 in supported_macs:
self.assertIsNone(p2.verify_mac(mac_value, data))
for p2 in unsupported_macs:
with self.assertRaises(
tink.TinkError,
msg='Language %s supports verify_mac with %s unexpectedly' %
(p2.lang, key_template_name)):
p2.verify_mac(mac_value, data)
for p in unsupported_macs:
with self.assertRaises(
tink.TinkError,
msg='Language %s supports compute_mac with %s unexpectedly' %
(p.lang, key_template_name)):
p.compute_mac(data)
# If the implementations work fine for keysets with single keys, then key
# rotation should work if the primitive wrapper is implemented correctly.
# These wrappers do not depend on the key type, so it should be fine to always
# test with the same key type. The wrapper needs to treat keys with output
# prefix RAW and LEGACY differently, so we also test templates with these
# prefixes.
KEY_ROTATION_TEMPLATES = [
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
keyset_builder.raw_template(mac.mac_key_templates.HMAC_SHA512_512BITTAG),
keyset_builder.legacy_template(mac.mac_key_templates.HMAC_SHA512_512BITTAG)
]
def key_rotation_test_cases(
) -> Iterable[Tuple[Text, Text, tink_pb2.KeyTemplate, tink_pb2.KeyTemplate]]:
for compute_lang in SUPPORTED_LANGUAGES:
for verify_lang in SUPPORTED_LANGUAGES:
for old_key_tmpl in KEY_ROTATION_TEMPLATES:
for new_key_tmpl in KEY_ROTATION_TEMPLATES:
yield (compute_lang, verify_lang, old_key_tmpl, new_key_tmpl)
class MacKeyRotationTest(parameterized.TestCase):
@parameterized.parameters(key_rotation_test_cases())
def test_key_rotation(
self, compute_lang, verify_lang, old_key_tmpl, new_key_tmpl):
# Do a key rotation from an old key generated from old_key_tmpl to a new
# key generated from new_key_tmpl. MAC computation and verification are done
# in languages compute_lang and verify_lang.
builder = keyset_builder.new_keyset_builder()
older_key_id = builder.add_new_key(old_key_tmpl)
builder.set_primary_key(older_key_id)
compute_mac1 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac1 = testing_servers.mac(verify_lang, builder.keyset())
newer_key_id = builder.add_new_key(new_key_tmpl)
compute_mac2 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac2 = testing_servers.mac(verify_lang, builder.keyset())
builder.set_primary_key(newer_key_id)
compute_mac3 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac3 = testing_servers.mac(verify_lang, builder.keyset())
builder.disable_key(older_key_id)
compute_mac4 = testing_servers.mac(compute_lang, builder.keyset())
verify_mac4 = testing_servers.mac(verify_lang, builder.keyset())
self.assertNotEqual(older_key_id, newer_key_id)
# 1 uses the older key. So 1, 2 and 3 can verify the mac, but not 4.
mac_value1 = compute_mac1.compute_mac(b'plaintext')
verify_mac1.verify_mac(mac_value1, b'plaintext')
verify_mac2.verify_mac(mac_value1, b'plaintext')
verify_mac3.verify_mac(mac_value1, b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac4.verify_mac(mac_value1, b'plaintext')
# 2 uses the older key. So 1, 2 and 3 can verify the mac, but not 4.
mac_value2 = compute_mac2.compute_mac(b'plaintext')
verify_mac1.verify_mac(mac_value2, b'plaintext')
verify_mac2.verify_mac(mac_value2, b'plaintext')
verify_mac3.verify_mac(mac_value2, b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac4.verify_mac(mac_value2, b'plaintext')
# 3 uses the newer key. So 2, 3 and 4 can verify the mac, but not 1.
mac_value3 = compute_mac3.compute_mac(b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac1.verify_mac(mac_value3, b'plaintext')
verify_mac2.verify_mac(mac_value3, b'plaintext')
verify_mac3.verify_mac(mac_value3, b'plaintext')
verify_mac4.verify_mac(mac_value3, b'plaintext')
# 4 uses the newer key. So 2, 3 and 4 can verify the mac, but not 1.
mac_value4 = compute_mac4.compute_mac(b'plaintext')
with self.assertRaises(tink.TinkError):
verify_mac1.verify_mac(mac_value4, b'plaintext')
verify_mac2.verify_mac(mac_value4, b'plaintext')
verify_mac3.verify_mac(mac_value4, b'plaintext')
verify_mac4.verify_mac(mac_value4, b'plaintext')
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
724eff4f3b472a292d51c465bff7567539d80d2d | eeee5415490822b06d4d75061f30b46b0ea7086d | /basic/c6/ajax.py | aae3ab7b72e09fd7f35e5897231733db4d2d3c64 | [] | no_license | nearxu/python-exercise | 891784380abfc0af16555f1a8f8bc3efc60be5e9 | 8d11a25cfbbd271e593846c18265aaaafead62fe | refs/heads/master | 2020-03-28T10:42:38.878811 | 2019-05-07T08:20:49 | 2019-05-07T08:20:49 | 148,136,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,138 | py | #!/usr/bin/env python
# coding=utf-8
import requests
from time import sleep
from urllib.parse import urlencode
from bs4 import BeautifulSoup as bs
# from pyquery import PyQuery as pq
import json
# 模拟ajax请求, 打开开发者模式,查看xhr类型的请求, 就是ajax的,
#然后分析, 用正常的请求方式即可
# 程序说明,当page=1的情况下,cards的第二条返回的是本人follow的用户以及其他的信息
BASE_URL = 'https://m.weibo.cn/api/container/getIndex?'
# type=uid&value=2830678474&containerid=1076032830678474&page=1
HEADERS = {
'Host': 'm.weibo.cn',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Referer': 'https://m.weibo.cn/u/2830678474',
'X-Requested-With': 'XMLHttpRequest',
}
def get_page(page=1):
params = {
'type': 'uid',
'value': '2830678474',
'containerid': '1076032830678474',
'page': str(page),
}
url = BASE_URL+urlencode(params)
try:
resp = requests.get(url, headers=HEADERS)
sleep(1)
if resp.status_code == 200:
return resp.text, page
except requests.ConnectionError as e:
print('Error: {}'.format(e.args))
def parse_page(json, page):
'''
用request请求德 json数据
'''
if json:
items = json.get('data').get('cards')
for index, item in enumerate(items):
if page == 1 and index == 1:
continue
else:
item = item.get('mblog')
weibo_data = {}
weibo_data['id'] = item.get('id')
# weibo_data['text'] = pq(item.get('text')).text()
weibo_data['attitudes'] = item.get('attitudes_count')
weibo_data['comments'] = item.get('comments_count')
weibo_data['reposts'] = item.get('reposts_count')
yield weibo_data
if __name__ == '__main__':
for page in range(1, 11):
json = get_page(page)
results = parse_page(*json)
for result in results:
print(result)
| [
"2448895924@qq.com"
] | 2448895924@qq.com |
9c3f2b0c2a48c90f999930139d86b16e300de59f | c37dd3634d2ed0781e4f9db9dc0eb7f9eef5d13d | /tests/concat/test_automapper_add_array.py | c916380ef4050b209c631eb512d2336351ee52b0 | [
"Apache-2.0"
] | permissive | jd-webb/SparkAutoMapper | d1f65c9465b2b2cc4bf222faaca683fc00a32a4e | ae119ef65361724fab964383e0926901578d41b8 | refs/heads/main | 2023-06-20T23:21:43.092630 | 2021-07-20T23:17:15 | 2021-07-20T23:17:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | from pathlib import Path
from typing import Dict
from pyspark.sql import SparkSession, DataFrame, Column
from tests.conftest import clean_spark_session
from spark_auto_mapper.automappers.automapper import AutoMapper
from spark_auto_mapper.helpers.automapper_helpers import AutoMapperHelpers as A
from pyspark.sql.functions import lit, concat, array, col
def test_automapper_add_array(spark_session: SparkSession) -> None:
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
data_json_file: Path = data_dir.joinpath("data.json")
source_df: DataFrame = spark_session.read.json(str(data_json_file), multiLine=True)
source_df.createOrReplaceTempView("patients")
source_df.show(truncate=False)
# Act
mapper = AutoMapper(
view="members", source_view="patients", drop_key_columns=False
).columns(age=A.column("identifier") + A.text("foo").to_array())
assert isinstance(mapper, AutoMapper)
sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
for column_name, sql_expression in sql_expressions.items():
print(f"{column_name}: {sql_expression}")
assert str(sql_expressions["age"]) == str(
concat(col("b.identifier"), array(lit("foo").cast("string"))).alias("age")
)
result_df: DataFrame = mapper.transform(df=source_df)
result_df.show(truncate=False)
assert result_df.where("id == 1730325416").select("age").collect()[0][0] == [
"bar",
"foo",
]
assert result_df.where("id == 1467734301").select("age").collect()[0][0] == [
"John",
"foo",
]
| [
"imranq2@hotmail.com"
] | imranq2@hotmail.com |
671f88ebf90a3007a6c9833f539f34fa029d9dff | 9e41adf86b2c166a219f0b6d9371089c5f2d7d93 | /Exerciciospython/Laços e repeticoes/e053.py | a87da6d24498a36550e91ffd0339e194a93bb82b | [] | no_license | Nadirlene/Exercicios-python | 1aaead61dd0efcb5303f6294e765e9e1d54506cc | 3fe82e166003922ef749756a249840ed1fe940b0 | refs/heads/main | 2022-12-25T21:35:06.172839 | 2020-09-28T15:08:37 | 2020-09-28T15:08:37 | 299,343,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | frase = str(input('Digite uma frase: ')).strip()
fraseJunta = frase.upper().split()
fraseSemEspaço = ''.join(fraseJunta)
#inversoFrase = ''
'''for posiçãoLetra in range(len(fraseSemEspaço)-1, -1, -1):
inversoFrase += fraseSemEspaço[posiçãoLetra]'''
inversoFrase = fraseSemEspaço[::-1]
if inversoFrase == fraseSemEspaço:
print('Temos um palíndromo!')
else:
print('Não temos um palíndromo')
print(fraseSemEspaço, inversoFrase)
| [
"nadirleneoliveira@yahoo.com"
] | nadirleneoliveira@yahoo.com |
5fdcde97501266ab80d0dbd780757cd190dbe0a5 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5690574640250880_1/Python/ronnodas/mine.py | 862c553df5b605c868a619a3e1ecc6a0018df8e7 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | T = int(input())
def solve(R,C,M):
# print('solving',R,C,M)
if R>C:
flipboard = solve(C,R,M)
if flipboard:
return [[flipboard[j][i] for j in range(C)] for i in range(R)]
else:
return
if M==0:
board = [['.']*C for i in range(R)]
board[-1][-1] = 'c'
return board
if R == 1:
board = ['*' if i<M else '.' for i in range(R*C)]
board[-1] = 'c'
return [board]
if R == 2:
if R*C==M+1:
board = [['*']*C for i in range(R)]
board[-1][-1] = 'c'
return board
if (M%2) or (M+2)==(R*C):
return
board = [['*' if i<(M/2) else '.' for i in range(C)] for j in range(R)]
board[-1][-1] = 'c'
return board
if M>=R:
subboard = solve(R,C-1,M-R)
if subboard:
return [['*']+r for r in subboard]
return
if (R,C,M) == (3,3,2):
return
k = min(M,C-2)
board = [['*']*k+['.']*(C-k)]
for i in range(M-k):
board.append(['*']+['.']*(C-1))
while len(board)<R:
board.append(['.']*(C))
board[-1][-1] = 'c'
return board
for case in range(1,T+1):
print("Case #",case,": ",sep='')
R,C,M = (int(x) for x in input().split())
ans = solve(R,C,M)
if ans:
for r in ans:
print(''.join(r))
else:
print('Impossible')
# for M in range(36):
# ans = solve(6,6,M)
# if ans:
# for r in ans:
# print(''.join(r))
# else:
# print('Impossible')
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
cb01cae9745f9e60e06c0e4649f9c96139e99244 | 2115f43b77978edc2e0e32833e6437dc98e6a202 | /evaluation.py | dd99f5c5bac972eff1aa40f23f9fb697d031ae28 | [] | no_license | zshwuhan/Graph_Embedding | 0f41ad1b5e64ab37ff8ef6bf335be766bba95b50 | 6a1b106a78209ae366e9c540337380a0e47fca06 | refs/heads/master | 2021-01-12T06:11:47.209280 | 2016-09-20T01:57:24 | 2016-09-20T01:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,521 | py | from __future__ import division
__author__ = 'Music'
# Given the model, evaluate the Accuracy@N defined in the paper
from collections import defaultdict
import cPickle
import bisect
import theano
import theano.tensor as T
import numpy as np
import lasagne
import lasagne.nonlinearities as nonlinearities
def train_test_split(checkin_file, training_percentage = 0.8):
""" In the paper, we choose data before 80% time for each user as training data
, remaining 20% as test data
:return:
"""
# All data: {user:[venue,venue,venue...]}
all_data = defaultdict(list)
count = 0
f = open(checkin_file, 'r')
for line in f:
if count == 0:
count += 1
continue
count += 1
userID, time, venueID, venue_name, venueLocation, venueCategory = line.strip().split('\t')
all_data[userID].append(venueID)
for userID in all_data:
all_data[userID].reverse() # Now it's in forward time order
test_count = 0
training_data = {}
test_data = {}
for userID in all_data:
cutoff = int(len(all_data[userID]) * training_percentage)
training_data[userID] = all_data[userID][:cutoff]
test_data[userID] = all_data[userID][cutoff:]
test_count += len(test_data[userID])
print '#test %i' % test_count
return training_data, test_data
def read_mappings(data_path):
venueID_index = {}
with open(data_path+"/venueID_index.txt", "r") as f:
for line in f:
parts = line.strip().split(',')
venueID_index[parts[0]] = int(parts[1])
userID_index = {}
with open(data_path+"/userID_index.txt", "r") as f:
for line in f:
parts = line.strip().split(',')
userID_index[parts[0]] = int(parts[1])
return venueID_index, userID_index
def build_predictor(snapshot_filename):
embeddings, all_param_values, dist_nn_sizes, dropout, _ = restore(snapshot_filename)
# initialize embeddings
print('Restoring embeddings ...')
node_vecs = theano.shared(value=embeddings[0], name='node_vecs')
node_vecs_c = theano.shared(value=embeddings[1], name='node_vecs_c')
type_vecs = theano.shared(value=embeddings[2], name='type_vecs')
type_vecs_c = theano.shared(value=embeddings[3], name='type_vecs_c')
node_vecs_dim = embeddings[0].shape[1]
type_vecs_dim = embeddings[2].shape[1]
print("#nodes %i\t dims %i" % (embeddings[0].shape[0], node_vecs_dim))
print("#types %i\t dims %i" % (embeddings[2].shape[0], type_vecs_dim))
# build the distance neural network
print('Restoring distance NN ...')
x = T.imatrix('x')
x_type_0 = T.iscalar('x_type_0')
x_type_1 = T.iscalar('x_type_1')
node_vec_input = T.concatenate(
[node_vecs[x[:,0]].reshape((x.shape[0], node_vecs.shape[1])),
node_vecs_c[x[:,1]].reshape((x.shape[0], node_vecs_c.shape[1]))],
axis=1)
type_vec_input = T.concatenate([type_vecs[T.cast(x_type_0,dtype='int32')],
type_vecs_c[T.cast(x_type_1,dtype='int32')]], axis=0)
type_vec_input = T.extra_ops.repeat(type_vec_input.dimshuffle('x', 0), x.shape[0], axis=0)
vecs_input = T.concatenate([node_vec_input, type_vec_input], axis=1)
vecs_input_dim = (node_vecs_dim+type_vecs_dim)*2
input_layer = lasagne.layers.InputLayer(shape=(None,vecs_input_dim), input_var=vecs_input, name='input_layer')
prev_layer = input_layer
for lid,size in enumerate(dist_nn_sizes):
if dropout[lid] == 1:
prev_layer = lasagne.layers.DropoutLayer(prev_layer, p=0.5)
layer = lasagne.layers.DenseLayer(prev_layer, num_units=size,
nonlinearity=nonlinearities.rectify, name='dense_layer_%d'%lid)
prev_layer = layer
output_layer = lasagne.layers.DenseLayer(prev_layer, num_units=1, nonlinearity=nonlinearities.sigmoid)
nn_output = lasagne.layers.get_output(output_layer, deterministic=True)
# restore dist_nn weights
all_params = lasagne.layers.get_all_params(output_layer)
for p,v in zip(all_params, all_param_values):
p.set_value(v)
# compile theano functions
predict_fn = theano.function([x, x_type_0, x_type_1], nn_output,
allow_input_downcast=True)
return predict_fn
def accuracy_at_N(test_data, location_circle, Ns=[1],
snapshot_filename="", data_path="data/", batch_size=1000):
predictor = build_predictor(snapshot_filename)
venueID_index, userID_index = read_mappings(data_path)
src_type = 0 #user
dst_type = 1 #venue
# predict
total_num = 0
total_correct = np.array([0]*len(Ns),dtype='float32')
for userID, venueIDs in test_data.iteritems():
for venueID in venueIDs:
total_num += 1
possible_venues = location_circle[venueID]
# transform data
venue_indexes = [venueID_index[venueID]] # the 0-th element is the target
venue_indexes += [venueID_index[e] for e in possible_venues]
venue_indexes = np.expand_dims(np.array(venue_indexes,dtype='int32'), axis=1)
x_0 = np.ones([batch_size,1])*userID_index[userID]
# predict by minibatches
results = np.array([])
nbatches = int(venue_indexes.shape[0]/batch_size)
for index in xrange(nbatches):
pred = predictor(np.concatenate([x_0,venue_indexes[index*batch_size:(index+1)*batch_size]],axis=1),
src_type, dst_type)
#pred = np.random.randn(batch_size,1)
results = np.append(results, pred)
# the last batch
if nbatches*batch_size < venue_indexes.shape[0]:
size = venue_indexes.shape[0] - nbatches*batch_size
x_0 = np.ones([size,1])*userID_index[userID]
pred = predictor(np.concatenate([x_0,venue_indexes[-size:]],axis=1),
src_type, dst_type)
#pred = np.random.randn(size,1)
results = np.append(results, pred)
# rank
ranklist = np.argsort(results)[::-1] # descending
rank = np.where(ranklist==0)[0][0]
for i,N in enumerate(Ns):
if rank <= N:
total_correct[i] += 1
if total_num % 100 == 0:
print "%i\t" % total_num,
print total_correct/total_num
return total_correct/total_num
def restore(snapshot_filename):
print 'restore from %s' % snapshot_filename
x = cPickle.load(open(snapshot_filename,"rb"))
embeddings, all_param_values, dist_nn_sizes, dropout, epoch = x[0],x[1],x[2],x[3],x[4]
return embeddings, all_param_values, dist_nn_sizes, dropout, epoch
if __name__ == "__main__":
training_data, test_data = train_test_split(checkin_file='data/foursquare/checkin_CA_venues.txt')
location_circle = cPickle.load(open("data/foursquare/location_circle.p", "rb"))
snapshot_filename="output/foursquare_n30_t30_dnn200-50_drp10_b1000_lr0.010000-100-0.2_e200.model"
#TODO: rule out unvisited venue in training set
accuracy = accuracy_at_N(test_data, location_circle, Ns=[1,5,10,15,20],
snapshot_filename=snapshot_filename,
data_path="data/foursquare/",
batch_size=5000)
print accuracy
| [
"635716260@qq.com"
] | 635716260@qq.com |
38ae6faed0f151dcd8d3a29aefc08ed61761d777 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/gjdsuv001/question4.py | 78f7f1a77441e529da82ea6ccecdddea408ea58d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | start = eval(input("Enter the starting point N:\n"))
end = eval(input("Enter the ending point M:\n"))
print("The palindromic primes are:")
for num in range(start+1, end):
if start ==end:
break
if all(num%i!=0 for i in range(2,num)):
num = str(num)
if num=="1":
continue
if num[::1] == num[::-1]:
print(num)
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
37b1bae998cd3d605b4e9b48538f005d86a4f353 | 3f60b999ea7bda83c9586f75f52463dc20337f24 | /sensitive_user_portrait/search/views.py | c864449a9169ba5e240772013e08d264af773cf6 | [] | no_license | jianjian0dandan/sensitive_user_portrait | 629e49ce71db92b50634bac9c828811cdb5381e9 | cacc30267ebc0e621b1d48d4f1206277a0f48123 | refs/heads/master | 2021-01-20T23:18:07.138057 | 2016-05-22T12:09:40 | 2016-05-22T12:09:40 | 42,869,287 | 0 | 0 | null | 2015-09-21T13:55:12 | 2015-09-21T13:55:11 | null | UTF-8 | Python | false | false | 681 | py | # -*- coding:utf-8 -*-
import os
import json
from flask import Blueprint, url_for, render_template, request, abort, flash, session, redirect
from sensitive_user_portrait.global_utils import es_sensitive_user_portrait as es
from utils import full_text_search
#mod = Blueprint('search', __name__, url_prefix='/search')
@mod.route('/full_text_search/')
def ajax_full_text_search():
words_string = request.args.get('words_list', '') #','seperate, 'a,b,c'
if not words_string:
return '0'
words_list = words_string.split(',')
results = full_text_search(words_list)
if results:
return json.dumps(results)
else:
return json.dumps([])
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
9a34b4a2a3bf8f44a4cc2130982e75080d178f23 | 1ff02c7b754a49820ee046769d21cbc61d0a22a7 | /bookstore/migrations/0001_initial.py | 3a3ace0a3818aa163093d96be8a00cb23c77d0b7 | [] | no_license | guajiropa/bobos_used_books | 98a91e456e1568759a83cb3fc08f563a6e39549f | 696ce7c4910f61816dc448fe8d636a3a734987fe | refs/heads/master | 2022-12-10T00:18:35.994289 | 2018-11-22T14:55:24 | 2018-11-22T14:55:24 | 156,604,032 | 0 | 1 | null | 2022-12-09T14:55:57 | 2018-11-07T20:27:06 | Python | UTF-8 | Python | false | false | 1,774 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=40)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=100)),
('publication_date', models.DateField()),
('authors', models.ManyToManyField(to='bookstore.Author')),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=30)),
('address', models.CharField(max_length=50)),
('city', models.CharField(max_length=60)),
('state_province', models.CharField(max_length=30)),
('country', models.CharField(max_length=50)),
('website', models.URLField()),
],
),
migrations.AddField(
model_name='book',
name='publisher',
field=models.ForeignKey(to='bookstore.Publisher'),
),
]
| [
"guajiropa@hotmail.com"
] | guajiropa@hotmail.com |
da15fd457f1825eaab4a40ad9e0265901731d7b7 | f757570cd6a43ad658927a31a723e03b2d592b6c | /blog/urls.py | f49a799522bb737c54aaee5e95d3fe376c11e841 | [] | no_license | kaito071831/my-first-blog | 447bd3f4547f4206a6f0d82b5152f88b4232478b | 32bf2c6cfddc72958cf5578773f1344dcebd5e20 | refs/heads/master | 2022-11-16T21:14:13.198184 | 2020-07-14T05:30:59 | 2020-07-14T05:30:59 | 274,639,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
path('drafts/', views.post_draft_list, name='post_draft_list'),
path('post/<int:pk>/publish/', views.post_publish, name='post_publish'),
path('post/<int:pk>/remove/', views.post_remove, name='post_remove'),
path('post/<int:pk>/comment/', views.add_comment_to_post, name='add_comment_to_post'),
path('comment/<int:pk>/approve/', views.comment_approve, name='comment_approve'),
path('comment/<int:pk>/remove/', views.comment_remove, name='comment_remove'),
] | [
"you@example.com"
] | you@example.com |
feb710bb46a11b74276e7d8a148ff56e74294017 | f83e6935c5b61c8d5232d3a327a932f4794433f5 | /apps/performance/api/models/HCSSYS_SystemConfig.py | 4a86bc7affe1ce91513f0581244f829b610dd787 | [] | no_license | nttlong2018/hr-python | 2965cf9858fee356558dce2e2482b41109c6c774 | 7916b812f6f9759a27395f7e78c30c07200f8502 | refs/heads/master | 2020-03-07T14:01:12.550849 | 2018-07-20T11:08:22 | 2018-07-20T11:08:22 | 127,516,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,306 | py | from config import database, helpers, db_context
import datetime
import base
import threading
_hasCreated=False
def HCSSYS_SystemConfig():
global _hasCreated
if not _hasCreated:
helpers.extent_model(
"HCSSYS_SystemConfig",
"base",
[],
is_has_number=helpers.create_field("bool"),
num_of_number=helpers.create_field("numeric"),
is_has_upper_char=helpers.create_field("bool"),
num_of_upper=helpers.create_field("numeric"),
is_has_lower_char=helpers.create_field("bool"),
num_of_lower=helpers.create_field("numeric"),
is_has_symbols=helpers.create_field("bool"),
num_of_symbol=helpers.create_field("numeric"),
is_ad_aut=helpers.create_field("bool"),
session_timeOut=helpers.create_field("numeric"),
time_out_expand=helpers.create_field("numeric"),
minimum_age=helpers.create_field("numeric"),
password_expiration=helpers.create_field("numeric"),
will_expire=helpers.create_field("bool"),
change_after=helpers.create_field("numeric"),
apply_minimum_age=helpers.create_field("bool"),
apply_history=helpers.create_field("bool"),
history=helpers.create_field("numeric"),
apply_minLength=helpers.create_field("bool"),
min_len=helpers.create_field("numeric"),
apply_maxLength=helpers.create_field("bool"),
max_len=helpers.create_field("numeric"),
lock_on_failed=helpers.create_field("bool"),
threshold_to_lock=helpers.create_field("numeric"),
time_lock=helpers.create_field("numeric"),
alert_before=helpers.create_field("numeric"),
is_first_change=helpers.create_field("bool"),
not_user_in_password=helpers.create_field("bool"),
date_format=helpers.create_field("text"),
dec_place_separator=helpers.create_field("text"),
dec_place_currency=helpers.create_field("numeric"),
default_language=helpers.create_field("text")
)
_hasCreated=True
ret = db_context.collection("HCSSYS_SystemConfig")
return ret | [
"zugeliang2000@gmail.com"
] | zugeliang2000@gmail.com |
d4f5e276ecfbb6a8059926899ef504905acf70f6 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/scipy/py2/scipy/sparse/setup.py | 61480e3001834ae8002a4f0866715197030f1584 | [
"Python-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Qhull",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 2,185 | py | from __future__ import division, print_function, absolute_import
import os
import sys
import subprocess
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('sparse',parent_package,top_path)
config.add_data_dir('tests')
config.add_subpackage('linalg')
config.add_subpackage('csgraph')
config.add_extension('_csparsetools',
sources=['_csparsetools.c'])
def get_sparsetools_sources(ext, build_dir):
# Defer generation of source files
subprocess.check_call([sys.executable,
os.path.join(os.path.dirname(__file__),
'generate_sparsetools.py'),
'--no-force'])
return []
depends = ['sparsetools_impl.h',
'bsr_impl.h',
'csc_impl.h',
'csr_impl.h',
'other_impl.h',
'bool_ops.h',
'bsr.h',
'complex_ops.h',
'coo.h',
'csc.h',
'csgraph.h',
'csr.h',
'dense.h',
'dia.h',
'py3k.h',
'sparsetools.h',
'util.h']
depends = [os.path.join('sparsetools', hdr) for hdr in depends],
config.add_extension('_sparsetools',
define_macros=[('__STDC_FORMAT_MACROS', 1)],
depends=depends,
include_dirs=['sparsetools'],
sources=[os.path.join('sparsetools', 'sparsetools.cxx'),
os.path.join('sparsetools', 'csr.cxx'),
os.path.join('sparsetools', 'csc.cxx'),
os.path.join('sparsetools', 'bsr.cxx'),
os.path.join('sparsetools', 'other.cxx'),
get_sparsetools_sources]
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| [
"arcadia-devtools@yandex-team.ru"
] | arcadia-devtools@yandex-team.ru |
e840420124556ad2ba4321a7678ae7a8a7d76a79 | cae1a61c2eb6701762320b3d5163d6621e02eb46 | /20210420 Daily Algorithm Coding.py | b0c0738c50d9096682f3c25b55ee30643f399d25 | [] | no_license | LeeWoojin-99/Algorithm | b8f08756120378ced1e3214f0647037ae21decc9 | 680994a761042ed9a307fcad727dc5d1807ef832 | refs/heads/main | 2023-07-10T06:47:30.089557 | 2021-08-13T14:48:07 | 2021-08-13T14:48:07 | 342,619,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | '''
2021. 04. 20.
Daily Algorithm Coding
Dijkstra Algorithm
'''
import heapq
INF = 999
n, e = map(int, input().split())
start = int(input())
graph = [[] for _ in range(n+1)]
distance = [INF]*(n+1)
for i in range(e):
x, y, dist = map(int,input().split())
graph[x].append((y, dist))
def dijkstra(start):
queue = []
heapq.heappush(queue, (0, start))
distance[start] = 0
while queue:
dist, now = heapq.heappop(queue)
for i in graph[now]:
cost = dist + i[1]
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(queue, (cost, i[0]))
dijkstra(start)
print(distance)
''' dijkstra input data
6 11
1
1 2 2
1 3 5
1 4 1
2 3 3
2 4 2
3 2 3
3 6 5
4 3 3
4 5 1
5 3 1
5 6 2
node edge
start node
edge data
edge data : start node, end node, distance
''' | [
"lwj1316@naver.com"
] | lwj1316@naver.com |
99f6db30dfb33b926b226b4d8fc8828336608c94 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/1255.py | f3ccb3a3fdf20995d6520a24c1ecce8756a14654 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | class Robot(object):
buttons = []
pos = 1
current = 0
def __init__(self, buttons):
self.buttons = [i[1] for i in buttons]
def move(self):
try:
i=self.buttons[self.current]
j=1 if self.pos < i else -1 if self.pos > i else 0
self.pos += j
except:
j=0
return j
def try_press(self):
try:
if self.pos == self.buttons[self.current]:
self.current+=1
return True
else:
return False
except:
return False
class TestChamber(object):
buttons = []
orange, blue = None, None
time = 0
def __init__(self, buttons):
self.buttons = buttons
s=lambda x,y:filter(lambda z:z[0]==y,x)
self.orange = Robot(s(buttons,'O'))
self.blue = Robot(s(buttons,'B'))
def move_robots(self):
self.time+=1
orange_move = self.orange.move()
blue_move = self.blue.move()
orange_press, blue_press = False, False
if not orange_move and self.next_button('O'):
orange_press = self.orange.try_press()
if not blue_move and not orange_press and self.next_button('B'):
blue_press = self.blue.try_press()
return self.orange.pos, orange_press, self.blue.pos, blue_press
def get_time(self):
return self.time
def is_complete(self):
return len(self.buttons) == self.orange.current + self.blue.current
def next_button(self, robot_type):
return robot_type == self.buttons[self.orange.current+self.blue.current][0]
def read_cases(cases):
res, cs = [], cases.split('\n')
n=int(cs[0])
for case in cases.split('\n')[1:n+1]:
l=case.split(" ")[1:]
res.append([(l[i],int(l[i+1])) for i in range(0,len(l)-1,2)])
return res
def test_robots(case):
chamber = TestChamber(case)
while not chamber.is_complete():
chamber.move_robots()
return chamber.time
def main():
cases = read_cases(open('/Users/MiLaN/gcj1l.txt','r').read())
out = open('/Users/MiLaN/gcj1lo.txt','w')
for i in range(len(cases)):
out.write("Case #%d: %d\n"%(i+1, test_robots(cases[i])))
out.close()
main() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4da403fef4a69e64696f80e8c147881127ceef94 | f06cf7cf7fe12a50f45edbd56b20acfc28606a44 | /String/rotated_string.py | 19584cba79f59e0c0c2f455697122164a9a2a1bd | [] | no_license | prashant97sikarwar/data-structure-and-algorithms | 53e016e50cf2ba26d2504f5a5f1ba39ca35f13f4 | 3ebe367c96a7bd82a427fc2beb7c8edd37247de7 | refs/heads/master | 2023-01-18T16:15:44.935431 | 2020-11-22T14:59:54 | 2020-11-22T14:59:54 | 257,667,068 | 0 | 0 | null | 2020-10-02T09:59:27 | 2020-04-21T17:32:43 | Python | UTF-8 | Python | false | false | 349 | py | t = int(input())
while t > 0:
s = input()
p = input()
n = len(p)
flag = 0
yup = 0
c = p[n-2] + p[n-1] + p
c = c[0:n]
if s in c:
flag = 1
else:
d = p + p[0] + p[1]
d = d[2:]
if s in d:
yup = 1
if (flag or yup):
print(1)
else:
print(0)
t -= 1 | [
"prashant97sikarwar@gmail.com"
] | prashant97sikarwar@gmail.com |
1b1a9e9ccf75d8a124818c9743e2cc838376384e | ee7d3a72770c3ab3dc2ad44ac1d004e68e22f7d6 | /apps/users/serializers.py | a311afb37f791e86d3cc61bf041fef444cec2c69 | [] | no_license | wzw5566/manytomany | 7ad6382013a49b17daa158cd311074443e78b82c | 5930ab67001ac31cf6f4f09a8c72f0e97657749e | refs/heads/master | 2020-04-19T07:28:22.837793 | 2019-01-30T15:24:37 | 2019-01-30T15:24:37 | 168,047,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # -*- coding: UTF-8 -*-
from django.contrib.auth import get_user_model
from apps.users.models import Role
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
User = get_user_model()
class RoleSerializer(serializers.ModelSerializer):
class Meta:
model = Role
fields = ("role_name", "role_desc", "role_parent_id")
class UserSerializer(serializers.ModelSerializer):
"""
用户详情序列化类
"""
# roles = serializers.SlugRelatedField(
# many=True,
# read_only=True,
# slug_field='role_name'
# )
# roles = RoleSerializer(many=True)
class Meta:
model = User
fields = ("username", "password", "avatar", "roles")
def create(self, validated_data):
# user = super(UserSerializer, self).create(validated_data=validated_data)
# user.set_password(validated_data["password"])
print("validated_data", validated_data)
roles_list = validated_data.pop('roles')
print(roles_list)
user = User.objects.create(**validated_data)
user.set_password(validated_data["password"])
user.save()
for role in roles_list:
new_role = Role.objects.filter(role_name=role.role_name).first()
# new_role.save()
user.roles.add(new_role)
user.save()
return user
| [
"wzw33874@qq.com"
] | wzw33874@qq.com |
956a5837b7b05ae87a8597372fcc922222b8fb3b | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715225832.py | 2502de0d6e2658396a9bf4418d1d531e3315c113 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,387 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge,2)
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def ageDict(filename, ageRangeList):
employeeAgeDictionary = {}
ageRanges = []
for i in ageRangeList:
employeeAgeDictionary[i] = []
print(employeeAgeDictionary)
for i in ageRange
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
# print(findCuisine('restaurants.txt', 'Mexican'))
# print(restaurantFilter('restaurants.txt'))
# print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
# print(seniorStaffAverage('employees.csv', 2019))
rangeList = ["18-22", "23-29", "30-39", "40-49"]
print(ageDict('employees.csv', rangeList))
| [
"sanjay.mamidipaka@gmail.com"
] | sanjay.mamidipaka@gmail.com |
990297985c3ec2ac181400cad39471fb61c13a90 | f50c5b559fc1ffcbb62caf52a093e255bf7147cd | /RemoveSubstring.py | d3748086e1ee7edfbefa88fc9d5fe473c0ee020d | [] | no_license | anish531213/Interesting-Python-problems | 38da1b30ccbca357cb44e66ef9ef09321600cd1d | b0d8fa97f260c1863e0c852a710ce5f3b5cecfc3 | refs/heads/master | 2021-01-10T13:50:30.869113 | 2016-02-17T17:03:32 | 2016-02-17T17:03:32 | 49,538,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | def RemoveSub(string, sub):
new = ''
i = 0
while i < len(string):
if string[i] == sub[0]:
if string[i: (i+len(sub))] == sub:
i += len(sub)
else:
new += string[i]
i += 1
else:
new += string[i]
i += 1
return new
print(RemoveSub('aniashanan', 'an'))
| [
"anish531213@gmail.com"
] | anish531213@gmail.com |
9560cd84f72c84c38cad3124051ddcb9c9485fc0 | 845d4102771a547dbc447f1d837b89a538f977b7 | /listaExercicios/Lista fabio 1/F1_Q15_TRINAGULO.py | 3c74ab0930715cd569f52e0c1ddad72de7a654cc | [] | no_license | TemistoclesZwang/Algoritmo_IFPI_2020 | 16e92d6f3e5e3f15ad573819cbd0171c5a5e3f5d | cc24657864985c3894ab738692807a01eab8d377 | refs/heads/main | 2023-08-23T02:57:58.838585 | 2021-10-05T16:18:14 | 2021-10-05T16:18:14 | 310,669,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #Entrada
base = int(input('Insira o valor da base:'))
altura = int(input('Insira o valor da altura:'))
#Processamento
formula = (base * altura) / 2
#Saída
print(f'A área de seu triângulo é {formula}')
| [
"temis2st@gmail.com"
] | temis2st@gmail.com |
32554565e60575688cea4eba15671a6e3eaef58a | 6ceba5434cb0dd59e4a2b64051005867b24ee2ba | /FacialRigTool/UI/Edit_UI.py | 3259bf9bf6b9fec5c0cf5f282e7c4bfd82602287 | [] | no_license | lefan2016/FacialRigTool | 8ce1e6c44e7548f3e368ef15efecc4b22d622ec2 | 9ea46068d8d77644c5f560fb68388de4389cfe4b | refs/heads/master | 2020-09-05T05:02:26.253629 | 2018-11-12T03:04:18 | 2018-11-12T03:04:18 | 219,990,056 | 1 | 0 | null | 2019-11-06T12:19:43 | 2019-11-06T12:19:42 | null | UTF-8 | Python | false | false | 6,665 | py | from PySide2 import QtWidgets
import maya.cmds as cmds
import logging
import inspect
from functools import partial
import Splitter_UI
class EditWidget(QtWidgets.QDialog):
def __init__(self, instance, uiName, functionName):
logging.basicConfig()
logger = logging.getLogger('%s' % uiName)
logger.setLevel(logging.INFO)
try:
cmds.deleteUI('%s' % uiName)
except:
logger.info('No %s exists!' % uiName)
super(EditWidget, self).__init__(parent=instance)
self.setObjectName('%s' % uiName)
self.setWindowTitle('%s' % uiName)
self.setModal(False)
self.instance = instance
self.functionName = functionName
# Turn on track selection order for selecting vertices and lines 1 by 1
self.TSO_Type = cmds.selectPref(q=1, tso=1)
cmds.selectPref(tso=1)
self.buildUI()
self.populate()
self.show()
self.refreshListWidget()
def buildUI(self):
"""
Build the Edit UI
:return: None
"""
# Main layout
self.mainLayout = QtWidgets.QGridLayout()
self.setLayout(self.mainLayout)
# parameters part
self.paramSplitterWidget = Splitter_UI.Splitter('Parameters')
self.formWidget = QtWidgets.QFrame()
self.formWidget.setFrameStyle(QtWidgets.QFrame.StyledPanel)
self.formWidget.setFrameShadow(QtWidgets.QFrame.Plain)
self.formLayout = QtWidgets.QFormLayout()
self.formWidget.setLayout(self.formLayout)
# Selection part
self.selSplitterWidget = Splitter_UI.Splitter('Check & Select')
selectionWidget = QtWidgets.QFrame()
selectionWidget.setFrameStyle(QtWidgets.QFrame.StyledPanel)
selectionWidget.setFrameShadow(QtWidgets.QFrame.Plain)
selectionLayout = QtWidgets.QVBoxLayout()
selectionWidget.setLayout(selectionLayout)
# filter part
filterLayout = QtWidgets.QHBoxLayout()
filterLabel = QtWidgets.QLabel('Filter: ')
self.jointCheck = QtWidgets.QCheckBox('joint')
self.locatorCheck = QtWidgets.QCheckBox('locator')
filterLayout.addWidget(filterLabel)
filterLayout.addWidget(self.jointCheck)
filterLayout.addWidget(self.locatorCheck)
self.jointCheck.stateChanged.connect(self.refreshListWidget)
self.locatorCheck.stateChanged.connect(self.refreshListWidget)
# arrangement
self.mainLayout.addWidget(self.paramSplitterWidget, 0, 0, 1, 1)
self.mainLayout.addWidget(self.formWidget, 1, 0, 1, 1)
self.mainLayout.addWidget(self.selSplitterWidget, 0, 1, 1, 1)
self.mainLayout.addWidget(selectionWidget, 1, 1, 1, 1)
self.listWidget = QtWidgets.QListWidget()
self.listWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.rowItem = {}
self.tupe = inspect.getargspec(func=self.functionName)
for i in self.tupe[0]:
layout = QtWidgets.QHBoxLayout()
self.rowItem[i] = QtWidgets.QLineEdit()
button = QtWidgets.QPushButton('<<<')
layout.addWidget(self.rowItem[i])
layout.addWidget(button)
button.clicked.connect(partial(self.setEditLine, self.rowItem[i]))
self.formLayout.addRow(i, layout)
selectionLayout.addLayout(filterLayout)
selectionLayout.addWidget(self.listWidget)
# selectionWidget.setMaximumHeight(self.formWidget.height())
self.createGeneralButton(self.mainLayout)
def setEditLine(self, editLine):
"""
set specified editLine text
:param editLine: specified editLine
:return: None
"""
# listWidget selected items
listItems = self.listWidget.selectedItems()
itemStr = []
for i in listItems:
itemStr.append(self.listWidget.item(self.listWidget.row(i)).text())
# vertices or lines
selList = cmds.ls(os=1)
finalList = itemStr + selList
if finalList:
if len(finalList) < 2:
editLine.setText(finalList[0])
else:
editLine.setText(str(finalList))
def saveData(self):
"""
Save the args info to the specified rig widget's rigArgs dictionary
:return: None
"""
tupe = inspect.getargspec(self.functionName)
for i in tupe[0]:
self.instance.rigArgs[i] = self.rowItem[i].text()
def setData(self):
"""
Save the rigArgs info and close the rigArgs dialog
:return: None
"""
self.saveData()
self.close()
cmds.selectPref(tso=self.TSO_Type)
def cancel(self):
"""
Cancel button action -> close the rigArgs dialog
:return: None
"""
self.close()
cmds.selectPref(tso=self.TSO_Type)
def populate(self):
"""
Refresh and populate the rigArgs info for each arg
:return: None
"""
for arg in self.instance.rigArgs.keys():
if arg in self.rowItem.keys():
self.rowItem[arg].setText(str(self.instance.rigArgs[arg]))
else:
raise RuntimeWarning('No specified properties!')
def createGeneralButton(self, layout):
"""
Create the Cancel and OK button for each widget
:param layout: the edit window main widget
:return: None
"""
btnWidget = QtWidgets.QWidget()
btnLayout = QtWidgets.QHBoxLayout(btnWidget)
layout.addWidget(btnWidget, 2, 0, 1, 2)
cancel_Btn = QtWidgets.QPushButton('Cancel')
OK_Btn = QtWidgets.QPushButton('OK')
btnLayout.addWidget(cancel_Btn)
btnLayout.addWidget(OK_Btn)
OK_Btn.clicked.connect(self.setData)
cancel_Btn.clicked.connect(self.cancel)
def refreshListWidget(self):
"""
refresh listWidget with specified checked
:return: None
"""
self.listWidget.clear()
joints = []
locators = []
if self.jointCheck.isChecked():
joints = cmds.ls(type='joint')
locaterShapes = []
if self.locatorCheck.isChecked():
locaterShapes = cmds.ls(type='locator')
for loc in locaterShapes:
locators.append(cmds.listRelatives(loc, p=1)[0])
returnList = joints + locators
if returnList:
if len(returnList) > 1:
self.listWidget.addItems(returnList)
else:
self.listWidget.addItem(returnList[0]) | [
"328665042@qq.com"
] | 328665042@qq.com |
157b04c4ca6cb4a3339a31e2372f3e9659c9c763 | c5602a1b2c1183783a89c9e5002ac727ac7d0b37 | /Server/Application/User/Login/Refresh/method.py | 2be0cba00b569342956e544a5d7affdb10a3283f | [] | no_license | parkjinhong03/Backend-for-Recycle | 57a98035921a83290ea8d5708d41e4903f026b56 | add997b8f76ccba1a8836c85d4b00cf290f0090c | refs/heads/master | 2020-07-28T18:33:10.320361 | 2019-10-04T11:59:40 | 2019-10-04T11:59:40 | 209,494,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from flask_jwt_extended import get_jwt_identity, create_access_token
def post():
current_id = get_jwt_identity()
access_token = create_access_token(identity=current_id)
return {"message": "access_token 재발급 완료", "access_token": access_token} | [
"jinhong0719@naver.com"
] | jinhong0719@naver.com |
b0a4e08b3f9fa770ffe2513d434bd4c65ffc9aab | e89f44632effe9ba82b940c7721cad19a32b8a94 | /text2shorthand/shorthand/waseda/kin.py | 6c08f72d99136f0d73a724d9a713f3b680df8465 | [] | no_license | Wyess/text2shorthand | 3bcdb708f1d7eeb17f9ae3181c4dd70c65c8986e | 5ba361c716178fc3b7e68ab1ae724a57cf3a5d0b | refs/heads/master | 2020-05-17T14:52:11.369058 | 2019-08-20T12:50:00 | 2019-08-20T12:50:00 | 183,776,467 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from ..waseda.char import WasedaChar
from ..waseda.ki import CharKi
class CharKin(WasedaChar):
def __init__(self, name='kin', kana='きん',
model='E8CL1E1F', head_type='E', tail_type='EF'):
super().__init__(name, kana, model, head_type, tail_type)
def get_paths(self):
return [CharKi.path_ECLEF()]
| [
"diyhacker@mail.goo.ne.jp"
] | diyhacker@mail.goo.ne.jp |
6370e076e01a24e53ee37f6da63de3194df30fd9 | af077539faf803bc3db8dcf20b8b31aa7b693027 | /roomapp/decorators.py | 5b2c8ec15c3d5d4066a0b6e7d9ea87d3aa30c708 | [] | no_license | matt700395/tema_1 | be4b47f081bff966ecbfc9a4a3220589db58ecdb | 58d4e77f75ef4c4893ebb3bee48945c16f39216a | refs/heads/master | 2023-08-30T01:14:22.892288 | 2021-11-16T10:33:00 | 2021-11-16T10:33:00 | 428,611,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from django.contrib.auth.mixins import AccessMixin
from django.contrib.auth.models import User
from django.http import HttpResponseForbidden
from django.shortcuts import render
from roomapp.models import Room
def room_ownership_required(func):
def decorated(request, *args, **kwargs):
room = Room.objects.get(pk=kwargs['pk'])
if not room.user == request.user:
return HttpResponseForbidden()
return func(request, *args, **kwargs)
return decorated
| [
"matt7003@korea.ac.kr"
] | matt7003@korea.ac.kr |
934f9596adb18bc21b481a85ae3578507f131216 | bc17d1b3c8774b80f5e2a703d36dd8407f0513f1 | /manejo_archivos.py | 9217796eae65e61181bb7497d494fda0c8c57def | [] | no_license | RAFASANT29/repositorio2 | 3a2f510bd26eca1c51c4bb2db772112c44307158 | 7f94765a5e5e0af46da9b6b940e47aff4a1d3efd | refs/heads/master | 2023-01-24T03:03:03.771341 | 2020-12-09T19:46:56 | 2020-12-09T19:46:56 | 314,951,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | try:
archivo = open("prueba.txt","w")
archivo.write("Agregamos info al archivo\n")
archivo.write("Adios")
except Exception as e:
print(e)
finally:
archivo.close()
#Despues de close ya no se puede usar el archivo
#archivo.write("hola")
| [
"you@example.com"
] | you@example.com |
dfef87f5fb962c67ff4bd55d6791f4dac5982f07 | fcad9a3a7c552d5346ec4bf9d47bb381189a5db5 | /0721.py | 2f6f024a2fcefd61740e6b38ade9b194164d97cd | [] | no_license | GINK03/yukicoder-solvers | dac51345991c416791aa39fc92bc340cb9590a74 | 69cc0c410a5f8c59aaf41c4d84f64895457d1567 | refs/heads/master | 2021-07-22T23:38:29.297257 | 2021-07-02T12:59:33 | 2021-07-02T12:59:33 | 13,683,298 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import datetime
format = '%Y/%m/%d'
delta = datetime.timedelta(days=+2)
time = datetime.datetime.strptime(input(), format)
print((time+delta).strftime('%Y/%m/%d'))
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
fd82b440e45c181301407acb82097af9b26711e0 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4001/codes/1592_1803.py | 35c76cb859568a674a06983f5572d265c4105a56 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | num = float(input("Sequencia: "))
X = num // 1000
x = num % 1000
Y = x // 100
y = x % 100
Z = y // 10
z = y % 10
W = z
exp1 = W * 2
exp2 = Z * 3
exp3 = Y * 4
exp4 = X * 5
D1 = (exp1 + exp2 + exp3 + exp4)% 11
print(int(D1)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
2ec4a8a1b8dc049c7b2712b60750cf829dcf2168 | 9afbb6993450d1e0c3bae68e86844bd06d4419ee | /Django_Project/mysite/polls/views.py | 9b986f818596842f5ac79ceb465c1ad4269ba54a | [] | no_license | Jigar710/Python_Programs | 6f331caac30878655d4cca4ad97d4214c0262088 | 714a6306487eb6712f32ccb51b6a2407a81873fa | refs/heads/main | 2023-02-25T12:24:44.874199 | 2021-01-28T15:43:24 | 2021-01-28T15:43:24 | 332,869,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | from django.shortcuts import render
from django.http import HttpResponse
from polls.models import Question
from django.template import loader
# Create your views here.
def index(request):
return HttpResponse("This is index url.")
def pollsindex(request):
return HttpResponse("This is polls index url.")
def pollsindex2(request):
return HttpResponse("This is polls index url2.")
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
def display(requst):
lst = Question.objects.all()
return HttpResponse(lst)
'''
def dis(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
'''
def dis(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context) | [
"jigar.shekhat.777@gmail.com"
] | jigar.shekhat.777@gmail.com |
dca0c291c98daa59d7fdc57d5cae4903c32d1f2c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03472/s998787684.py | 4c4feff5518f1548df2e74af62ed36049b02416a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | import heapq
import math
N,H = map(int,input().split())
a_ls = [0]*N
b_ls =[0]*N
for i in range(N):
a, b = map(int,input().split())
a_ls[i] = a
b_ls[i] = -b
a_max = max(a_ls)
heapq.heapify(b_ls)
total = 0
count = 0
for i in range(len(b_ls)):
b_max = -heapq.heappop(b_ls)
if a_max>=b_max:
count += math.ceil((H-total)/a_max)
total = H
break
total += b_max
count +=1
if total >=H:
break
count += math.ceil(max(0,(H-total))/a_max)
print(count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
693a4dc084d4e4d1163d6f701ed19f474b48b90c | 0af33c10d3c6cb817f5067743f06c71ad99bca0b | /done/coci08c4p1.py | c5f2d8611eac508615679a5abdfb90783ff46a54 | [] | no_license | nathanlo99/dmoj_archive | 6d4f84b58d963109c73a52281a4aa06dd79bc1da | b137f8fa5894542a39e168c1f6485d622a119f42 | refs/heads/master | 2020-04-15T12:47:18.181614 | 2019-07-21T03:03:21 | 2019-07-21T03:03:21 | 62,849,549 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | import sys
input = sys.stdin.readline
nums = list(map(int, input().split()))
for i in range(4):
for j in range(4):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
print(" ".join(map(str, nums))) | [
"nathanlo99@hotmail.ca"
] | nathanlo99@hotmail.ca |
406830cfaec7cee3a78554a16fd8b93d30887569 | 21f77fc058058271a77252dccafc1a33f0ba554c | /caid/graphics/colorbar.py | 5fe630cf8c93ab93ed789c738b31fff79023b3c6 | [
"MIT"
] | permissive | sommaric/caid | ea238f850e08bcc43f9799778d40a8fa2b53ddf0 | 213821e3178dbf15482b042bfc6ffa8b1f9f04d8 | refs/heads/master | 2021-01-15T14:37:14.627294 | 2015-04-06T07:35:16 | 2015-04-06T07:35:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | # -*- coding: UTF-8 -*-
import sys
import numpy as np
from numpy import array, linspace, zeros, zeros_like
from caid.cad_geometry import cad_geometry
#from igakit.graphics import glFreeType
try:
from OpenGL.arrays import vbo
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
except:
print('''ERROR: PyOpenGL not installed properly.''')
from caid.field import field
class colorbar(field):
def __init__(self, C, xmax=0., xmin=0., ymax=0., ymin=0., zmax=0., zmin=0., colormap=None, side='right'):
self._colormap = colormap
self._C = C
self._side = side
print("> create colorbar with ", len(C), " side ", side)
if side == 'right':
scale = [0.25, 2.]
displ = [xmax+2.,0.]
n = [0,len(C)-2]
if side == 'left':
scale = [0.25, 2.]
displ = [xmin-1.,0.]
n = [0,len(C)-2]
if side == 'bottom':
scale = [2., 0.25]
displ = [0.,ymin-2.]
n = [len(C)-2,0]
if side == 'top':
scale = [2., 0.25]
displ = [0.,ymax+2.]
n = [len(C)-2,0]
from caid.cad_geometry import square
geometry = square()
values = square(n=n)
for geo in [geometry, values]:
for axis in range(0, 2):
geo.scale(scale[axis], axis=axis)
geo.translate(displ)
patch_id = 0
nrb = values[patch_id]
P = zeros_like(nrb.points)
if side == 'right':
P[0,:,0] = C
P[1,:,0] = C
if side == 'left':
P[0,:,0] = C
P[1,:,0] = C
if side == 'bottom':
P[:,0,0] = C
P[:,1,0] = C
if side == 'top':
P[:,0,0] = C
P[:,1,0] = C
nrb.set_points(P)
field.__init__(self, geometry=geometry, values=values, type='scalar')
| [
"ratnaniahmed@gmail.com"
] | ratnaniahmed@gmail.com |
3a2dff0b711b79895235dc55a8de818d2c5a163b | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/pol/consumer.py | 66fab4ae63cd62879a3d286c5627a8e5c33ac536 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,796 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Consumer(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.pol.Consumer")
meta.moClassName = "polConsumer"
meta.rnFormat = "polcons-[%(oDn)s]"
meta.category = MoCategory.REGULAR
meta.label = "Consumer for the policy pulled in the shard"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.pol.RsPrToL3extProvLblDef")
meta.parentClasses.add("cobra.model.pol.RsPrToBDSubnetHolder")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.rnPrefixes = [
('polcons-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "oDn", "oDn", 20287, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("oDn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "oDn"))
getattr(meta.props, "oDn").needDelimiter = True
def __init__(self, parentMoOrDn, oDn, markDirty=True, **creationProps):
namingVals = [oDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
3d1cc9ec8d10c76a60a667667a6d0fdabd5902a0 | 1edc4d50047fc5af03e58ce9ec0146e1131d0254 | /practice/aboutstring.py | b17fbebc121ae946b3e746adab018ce180342dbb | [] | no_license | zhaisa/myproject01 | 7b0fdc17353db0bc5251a7ab4216156f2e7b782a | f159114732b1aabf89fda60682f35fa0eea06196 | refs/heads/master | 2020-04-12T07:56:54.525747 | 2018-12-19T03:52:48 | 2018-12-19T03:53:01 | 162,376,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,531 | py | aa="abc\tdefAAaabbbbbbbcCccccccg"
cc=aa.capitalize()#把字符串的第一个字符大写
print(cc)
dd=aa.center(50)#返回一个原字符串居中,并使用空格填充至长度 width 的新字符串
print(dd)
ee=aa.count("b",0,len(aa))#返回 str 在 string 里面出现的次数,
# 如果 beg 或者 end 指定则返回指定范围内 str 出现的次数
print(ee)
ff=aa.encode(encoding="UTF-8",errors="static")#以 encoding 指定的编码格式编码 string,
# 如果出错默认报一个ValueError 的异常,除非 errors 指定的是'ignore'或者'replace'
print("编码为:{0}".format(ff))
print(type(ff))
gg=ff.decode(encoding="UTF-8",errors="static")#以 encoding 指定的编码格式解码 string,
# 如果出错默认报一个 ValueError 的 异 常 , 除非 errors 指 定 的 是 'ignore' 或 者'replace'
print("解码为:{0}".format(gg))
print(type(gg))
hh=aa.endswith('g',0,len(aa))#检查字符串是否以 obj 结束,如果beg 或者 end
# 指定则检查指定的范围内是否以 obj 结束,如果是,返回 True,否则返回 False.
print(hh)
ii=aa.expandtabs(tabsize=8)#把字符串 string 中的 tab 符号(\t)转为空格,tab 符号默认的空格数是 8。
print(ii)
jj=aa.find('ef',0,len(aa))#检测 str 是否包含在 string 中,
# 如果 beg 和 end 指定范围,则检查是否包含在指定范围内,如果是返回开始的索引值,否则返回-1
print(jj)
kk=aa.format()#格式化字符串
print(kk)
ll=aa.index('ef',0,len(aa))#跟find()方法一样,只不过如果str不在 string中会报一个异常.
print("查找结果为:{0}".format(ll))
mm=aa.isalnum()#如果 string 至少有一个字符并且所有字符都是字母或数字则返
#回 True,否则返回 False
print("至少有一个字符并且所有字符都是字母或数字:{}".format(mm))
nn=aa.isalpha()#如果 string 至少有一个字符并且所有字符都是字母则返回 True,
#否则返回 False
print("至少有一个字符并且所有字符都是字母:{}".format(nn))
oo=aa.isdecimal()#如果 string 只包含十进制数字则返回 True 否则返回 False.
print("只包含十进制数字:{}".format(oo))
pp=aa.isdigit()#如果 string 只包含数字则返回 True 否则返回 False.
print("只包含数字:{}".format(pp))
qq=','.join(aa)#以 string(,) 作为分隔符,将 seq(aa) 中所有的元素(的字符串表示)合并为一个新的字符串
print("{}".format(qq))
print("最大值为:{0},最小值为:{1},aa的类型为:{2}".format(max(aa),min(aa),type(aa)))
| [
"whatever"
] | whatever |
e137e657cfe2c20b7845d3caf20fc1bb6af1faba | c70f570f25fd4f9839e351433c1f0db57eccd09a | /project_name/sitemaps.py | f5a9c80a87be9345133b0e82f95a5268b991dd32 | [] | no_license | nigma/django-modern-template | 1e4f34039a293b1df56fa659ad887186dd99b607 | d07332b4833fa2371e5238fcfada28312b0cb3c4 | refs/heads/master | 2020-05-19T16:06:44.453500 | 2013-03-18T09:56:24 | 2013-03-18T09:56:24 | 8,131,354 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.sitemaps import GenericSitemap
from about.sitemaps import AboutSitemap
#class SampleSitemap(GenericSitemap):
# info_dict = {"queryset": Model.objects.all(), "date_field": "modified_at"}
# changefreq = "week"
# priority = 0.6
#
# def __init__(self):
# super(SampleSitemap, self).__init__(
# self.info_dict, self.priority, self.changefreq)
sitemaps = {
"about": AboutSitemap(),
}
| [
"en@ig.ma"
] | en@ig.ma |
6dfa82dfc007752e434b0ef5a458206ed37322d3 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_18932.py | 6316b092571e2a8d6e30ac4c754fb7905e320781 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | # SHA256 encoding in javascript
var signature = CryptoJS.HmacSHA256(message,API_SECRET).toString(CryptoJS.enc.Hex).toUpperCase();
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
05d396829b764106bd6d6176a512e5fe93f7cba2 | 388081b4b22f62920b4f33f8ee6a13ca389dc7c2 | /backend/app/__init__.py | a556152e74dc73f0f3b737d95d053ba6fd459c7e | [] | no_license | NishanthMHegde/CompleteBlockchain | 44e28a3f342539d11c8ac14861bf05fb6397d0aa | bba3c1ff3b4c10abc09e0233b9184b308450e9d5 | refs/heads/master | 2022-12-05T01:06:39.386223 | 2020-08-29T17:37:43 | 2020-08-29T17:37:43 | 281,154,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,988 | py | import os
import random
import requests
from flask import Flask, jsonify, request
from flask_cors import CORS
from backend.blockchain.blockchain import Blockchain
from backend.blockchain.block import Block
from backend.pubsub import PubSub
from backend.wallet.wallet import Wallet
from backend.wallet.transactions import Transactions
from backend.wallet.transaction_pool import TransactionPool
app = Flask(__name__)
CORS(app, resources = {r'/*': {'origins': 'http://localhost:3000'}})
blockchain = Blockchain()
wallet = Wallet(blockchain)
transaction_pool = TransactionPool()
pubsub = PubSub(blockchain, transaction_pool)
@app.route('/')
def welcome_blockchain():
return "Welcome to the Blockchain", 200
@app.route('/blockchain')
def get_blockchain():
return jsonify(blockchain.to_json()), 200
@app.route('/blockchain/mine')
def mine_block():
#get the transaction data and put it into the data field of add_block
transaction_data = transaction_pool.transaction_data()
#Append the transaction reward into the wallet of the miner
transaction_reward = Transactions.transaction_reward(wallet).to_json()
transaction_data.append(transaction_reward)
blockchain.add_block(transaction_data)
block = blockchain.chain[-1]
#publish the block throughout the network to the subscribed channel
pubsub.broadcast_block(block)
#After everytime a block is mined, we need to clear the transaction pool.
transaction_pool.clear_transaction(blockchain)
return block.to_json(), 200
@app.route('/wallet/transaction', methods=['POST'])
def make_transaction():
transaction_json = request.get_json()
#check if transaction exists
transaction = transaction_pool.existing_transaction(wallet.address)
if transaction:
transaction.update_transaction(
wallet,
transaction_json['recipient'],
transaction_json['amount'])
else:
transaction = Transactions(
wallet,
transaction_json['recipient'],
transaction_json['amount'])
#broadcast the transaction object
pubsub.broadcast_transaction(transaction)
return jsonify(transaction.to_json())
@app.route('/wallet/info')
def get_wallet_info():
return jsonify({"address": wallet.address, "balance":wallet.balance})
#code to get a range from blockchain
@app.route('/blockchain/range')
def get_blockchain_range():
start = int(request.args.get('start'))
end = int(request.args.get('end'))
#return the blockchain in reverse order
return jsonify(blockchain.to_json()[::-1][start:end])
@app.route('/blockchain/length')
def get_blockchain_length():
return jsonify(len(blockchain.chain))
@app.route('/transactions/history')
def get_historical_recipiets():
historical_recipients = set()
for index,block in enumerate(blockchain.chain):
if index ==0:
continue
for transaction in block.data:
historical_recipients.update(transaction['output'].keys())
return jsonify(list(historical_recipients))
@app.route('/transactions')
def get_all_transactions():
transaction_data = transaction_pool.transaction_data()
return jsonify(transaction_data)
ROOT_PORT = 5000
PORT = ROOT_PORT
if os.getenv('PEER'):
PORT = random.randint(5001, 6000)
result = requests.get("http://localhost:%s/blockchain" % (ROOT_PORT))
result_blockchain = Blockchain.from_json(result.json())
try:
blockchain.replace_chain(result_blockchain)
print("Chain replacement with root node was successful")
except Exception as e:
print("Chain replacement was not successful: %s" % (e))
#Seed a few data into the backend to help us evaluate the frontend design
if os.getenv('SEED'):
for i in range(0,10):
blockchain.add_block([Transactions(Wallet(), Wallet().address, i).to_json()])
for i in range(0, 10):
transaction_pool.set_transaction(Transactions(Wallet(), Wallet().address, i))
app.run(port=PORT)
| [
"="
] | = |
3672da256a51dd04dafc9658126eedaece7df6df | 43edf3962d8a76f1d656bd08a82211baf4bd63cf | /classification/algorithm/2-dynamic programming/2-linear model/6-152. Maximum Product Subarray.py | 3c78c531e979556966ec78f29d07da07d91cd937 | [] | no_license | jungleQi/leetcode-sections | 2fea929a9b9fcc99675fba22081c6b90b4f57781 | c3110302443fbeb7d4e84e23f3b8b80f3aaadb3c | refs/heads/master | 2022-08-31T09:13:10.026105 | 2022-08-10T02:20:44 | 2022-08-10T02:20:44 | 205,525,119 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | '''
Given an integer array nums, find the contiguous subarray
within an array (containing at least one number) which has the largest product.
Example 1:
Input: [2,3,-2,4]
Output: 6
Explanation: [2,3] has the largest product 6.
'''
def maxProduct(nums):
minProd, maxProd, maxTotal = nums[0], nums[0], nums[0]
for num in nums[1:]:
minProd, maxProd = min(minProd * num, maxProd * num, num), max(num, minProd * num, maxProd * num)
maxTotal = max(minProd, maxProd, maxTotal)
return maxTotal
nums = [2,3,-2,4]
print maxProduct(nums) | [
"85937023@qq.com"
] | 85937023@qq.com |
eabff6238cf14fbd966ed05b210da99389dd6c4a | 59107025a7f9afe0f94d194d547d0354e11ff6e7 | /Python3-WorldIII/ex106.py | df16384865ec069b2eede307f087277bb1538eef | [
"MIT"
] | permissive | samirsaravia/Python_101 | 083856643a5ca132f7126bb9a6b51b3805ba6bbe | 0c45f11d74a356514a0c436ade6af4c0f67c56b7 | refs/heads/master | 2022-12-19T16:12:20.751592 | 2020-10-19T12:30:18 | 2020-10-19T12:30:18 | 251,749,435 | 0 | 0 | MIT | 2020-10-19T12:45:40 | 2020-03-31T21:42:36 | Python | UTF-8 | Python | false | false | 1,973 | py | from time import sleep
def cor(lb_ffuc=False, lp_fa=False, lp_fb=False, lb_fvm=False, fim_c=False):
"""
-> Cores (Opcionais)
:param lb_ffuc: letra branca, fundo fuccia
:param lp_fa: letra preta, fundo azul
:param lp_fb: letra preta, fundo branco
:param lb_fvm: letra branca, fundo vermelho
:param fim_c: fim da cor
:return: código das cores escolhidas.
"""
if lb_ffuc:
return '\033[1;45m'
if lp_fa:
return '\033[1;30;44m'
if lp_fb:
return '\033[7;97m'
if lb_fvm:
return '\033[1;97;41m'
if fim_c:
return '\033[m'
def lin(lugar_linha):
"""
-> Linha que se adapta a frase
:param lugar_linha: calcula quantas palavras tem a frase (com +2 espaços adicionados/final)
"""
print(f'{"~" * (len(lugar_linha) + 2)}')
def py_help():
"""
-> Ajuda interativa usando o help()
:return: resultado da pesquisa ,dentro do help() e usa cores de funções.
"""
bibl = {'title': ' Sistema de ajuda Py_help', 'acesso': ' Acessando ao comando ', 'fim': ' Até Logo'}
busca = ' '
while busca != 'fim':
sleep(1.6)
print(f'{cor(fim_c=True)}{cor(lb_ffuc=True)}', end='')
lin(bibl['title'])
print(bibl['title'])
lin(bibl['title'])
busca = input(f'{cor(fim_c=True)}{"Função ou Biblioteca >>> "}').lower().strip()
if busca == 'fim':
sleep(1.3)
print(cor(lb_fvm=True), end='')
lin(bibl['fim'])
print(bibl["fim"])
lin(bibl['fim'])
break
else:
sleep(2)
print(cor(lp_fa=True), end='')
lin(bibl['acesso'] + busca)
print(f'{bibl["acesso"]}{busca}')
lin(bibl['acesso'] + busca)
sleep(3)
print(f'{cor(fim_c=True)}{cor(lp_fb=True)}', end='')
help(busca)
return help
# programa principal
py_help()
| [
"samir.saravia.10@gmail.com"
] | samir.saravia.10@gmail.com |
fdcdcd9219447b29e843fce9320eb4fade2222ee | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Detectron/lib/modeling/retinanet_heads.py | 638b58a49e2a8478d32a0144e2cb1b2578cbe5c7 | [
"MIT",
"Apache-2.0"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:46c17769887b4cd659082149a6e8c284b34b8d5a5385409f49f3d84ddde51cbd
size 11910
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
cec687423cdc29c8b41e252903ff99b1fd63f5a1 | d5fd936e7346844a1b7c5ea81dfa9adf5bb647d0 | /models/model_config.py | 6b5524cb0eadcef61b271749110a41abef29ba79 | [] | no_license | isaachenrion/graphs | 098e7098a894a3d1d9d18cf0ce1054e5910afa15 | 2ba6d50a7f61233fa8cc92ba03256691abb889de | refs/heads/master | 2021-01-02T09:10:49.686240 | 2017-09-11T19:52:48 | 2017-09-11T19:52:48 | 99,154,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from collections import namedtuple
from .mpnn import make_mpnn, get_mpnn_config
from .flat import make_flat, get_flat_config
def get_model_generator(model_str):
if model_str in ['mpnn', 'mpnn_set', 'vcn', 'dtnn']:
return make_mpnn
elif model_str == 'flat':
return make_flat
def get_model_config(model_str, args, dataset):
if model_str in ['mpnn', 'mpnn_set', 'vcn', 'dtnn']:
return get_mpnn_config(args, dataset)
elif model_str == 'flat':
return get_flat_config(args, dataset)
| [
"isaachenrion@gmail.com"
] | isaachenrion@gmail.com |
b3f923b791a7f6316391a5ac13ce4fc941bd94a0 | 9e0103b7507c6f9798ac366396f54a1e68bfaea8 | /Arrays/MinStartValue.py | 721ea81e45965a1d180d931664436aef23e220d6 | [] | no_license | Swagat-Kumar/Competitive_Codes_Python | 0dcdeb76cab50fde4041165835b8a57e083dbc24 | b3e64438082d9bce4b840a9253d42adc5de07dd3 | refs/heads/master | 2023-06-15T08:39:30.612215 | 2021-07-20T10:07:20 | 2021-07-20T10:07:20 | 300,352,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | class Solution:
def minStartValue(self, nums) -> int:
minn = 10*6
aux = 0
for n in nums:
aux += n
if aux < minn:
minn = aux
if minn >= 1:
return 1
return abs(minn)+1
| [
"swagat.kumar.code@gmail.com"
] | swagat.kumar.code@gmail.com |
ca81ac945337655fcc3df93e4d9ba66940e3b384 | 51d738d8905e274887fc2505c0e5b351a549c8f4 | /snippets/models/validators.py | 0568cb0df6e3949424a288c03bf2ec25ad4ec366 | [
"MIT"
] | permissive | wizzzet/github_backend | 5688d8e12a71050a579bf68f8715170ccf174b62 | 9e4b5d3273e850e4ac0f425d22911987be7a7eff | refs/heads/main | 2023-01-03T17:30:53.346526 | 2020-10-28T01:55:12 | 2020-10-28T01:55:12 | 307,731,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | def validate_svg_file_extension(value):
import os
from django.core.exceptions import ValidationError
ext = os.path.splitext(value.name)[1]
valid_extensions = ['.svg']
if not ext.lower() in valid_extensions:
raise ValidationError('Файл не в формате SVG')
| [
"wizzzet@gmail.com"
] | wizzzet@gmail.com |
c4846cad1369e6efeb3ecf4c058fc0611d2f595a | a4a2e0f31b55d1d4f1c56eb2d8056a465fa84d87 | /clastic/middleware/core.py | 9a57cda1100960529575a33c400af70341cac803 | [
"BSD-3-Clause"
] | permissive | kurtbrose/clastic | 45d03bd8c97b80556b209f487fa3f787c09aefdf | 0ef3700dee56c06bfd3efc20cdb9135bb320de06 | refs/heads/master | 2023-08-18T08:50:27.985916 | 2014-04-04T19:03:53 | 2014-04-04T19:03:53 | 10,716,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | # -*- coding: utf-8 -*-
import itertools
from collections import defaultdict
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseResponse # TODO: remove dependency
from ..sinter import make_chain, get_arg_names, _VERBOSE
class Middleware(object):
unique = True
reorderable = True
provides = ()
endpoint_provides = ()
render_provides = ()
request = None
endpoint = None
render = None
@property
def name(self):
return self.__class__.__name__
def __eq__(self, other):
return type(self) == type(other)
def __ne__(self, other):
return type(self) != type(other)
@cached_property
def requires(self):
reqs = []
for func_name in ('request', 'endpoint', 'render'):
func = getattr(self, func_name, None)
if func:
reqs.extend(get_arg_names(func, True))
unique_reqs = set(reqs)
unique_reqs.discard('next')
return list(unique_reqs)
@cached_property
def arguments(self):
args = []
for func_name in ('request', 'endpoint', 'render'):
func = getattr(self, func_name, None)
if func:
args.extend(get_arg_names(func))
return set(args)
def check_middleware(mw):
for f_name in ('request', 'endpoint', 'render'):
func = getattr(mw, f_name, None)
if not func:
continue
if not callable(func):
raise TypeError('expected %s.%s to be a function'
% (mw.name, f_name))
if not get_arg_names(func)[0] == 'next':
raise TypeError("middleware functions must take argument"
" 'next' as the first parameter (%s.%s)"
% (mw.name, f_name))
def check_middlewares(middlewares, args_dict=None):
args_dict = args_dict or {}
provided_by = defaultdict(list)
for source, arg_list in args_dict.items():
for arg_name in arg_list:
provided_by[arg_name].append(source)
for mw in middlewares:
check_middleware(mw)
for arg in mw.provides:
provided_by[arg].append(mw)
for arg in mw.endpoint_provides:
provided_by[arg].append(mw)
for arg in mw.render_provides:
provided_by[arg].append(mw)
conflicts = [(n, tuple(ps)) for (n, ps) in
provided_by.items() if len(ps) > 1]
if conflicts:
raise NameError('found conflicting provides: %r' % conflicts)
return True
def merge_middlewares(old, new):
# TODO: since duplicate provides aren't allowed
# an error needs to be raised if a middleware is
# set to non-unique and has provides params
old = list(old)
merged = list(new)
for mw in old:
if mw.unique and mw in merged:
if mw.reorderable:
continue
else:
raise ValueError('multiple inclusion of unique '
'middleware %r' % mw.name)
merged.append(mw)
return merged
class DummyMiddleware(Middleware):
def __init__(self, verbose=False):
self.verbose = verbose
def request(self, next, request):
name = '%s (%s)' % (self.__class__.__name__, id(self))
if self.verbose:
print name, '- handling', id(request)
try:
ret = next()
except Exception as e:
if self.verbose:
print name, '- uhoh:', repr(e)
raise
if self.verbose:
print name, '- hooray:', repr(ret)
return ret
def make_middleware_chain(middlewares, endpoint, render, preprovided):
"""
Expects de-duplicated and conflict-free middleware/endpoint/render
functions.
# TODO: better name to differentiate a compiled/chained stack from
# the core functions themselves (endpoint/render)
"""
_next_exc_msg = "argument 'next' reserved for middleware use only (%r)"
if 'next' in get_arg_names(endpoint):
raise NameError(_next_exc_msg % endpoint)
if 'next' in get_arg_names(render):
raise NameError(_next_exc_msg % render)
req_avail = set(preprovided) - set(['next', 'context'])
req_sigs = [(mw.request, mw.provides)
for mw in middlewares if mw.request]
req_funcs, req_provides = zip(*req_sigs) or ((), ())
req_all_provides = set(itertools.chain.from_iterable(req_provides))
ep_avail = req_avail | req_all_provides
ep_sigs = [(mw.endpoint, mw.endpoint_provides)
for mw in middlewares if mw.endpoint]
ep_funcs, ep_provides = zip(*ep_sigs) or ((), ())
ep_chain, ep_args, ep_unres = make_chain(ep_funcs,
ep_provides,
endpoint,
ep_avail)
if ep_unres:
raise NameError("unresolved endpoint middleware arguments: %r"
% list(ep_unres))
rn_avail = ep_avail | set(['context'])
rn_sigs = [(mw.render, mw.render_provides)
for mw in middlewares if mw.render]
rn_funcs, rn_provides = zip(*rn_sigs) or ((), ())
rn_chain, rn_args, rn_unres = make_chain(rn_funcs,
rn_provides,
render,
rn_avail)
if rn_unres:
raise NameError("unresolved render middleware arguments: %r"
% list(rn_unres))
req_args = (ep_args | rn_args) - set(['context'])
req_func = _create_request_inner(ep_chain,
rn_chain,
req_args,
ep_args,
rn_args)
req_chain, req_chain_args, req_unres = make_chain(req_funcs,
req_provides,
req_func,
req_avail)
if req_unres:
raise NameError("unresolved request middleware arguments: %r"
% list(req_unres))
return req_chain
_REQ_INNER_TMPL = \
'''
def process_request({all_args}):
context = endpoint({endpoint_args})
if isinstance(context, BaseResponse):
resp = context
else:
resp = render({render_args})
return resp
'''
def _named_arg_str(args):
return ', '.join([a + '=' + a for a in args])
def _create_request_inner(endpoint, render, all_args,
endpoint_args, render_args,
verbose=_VERBOSE):
all_args_str = ','.join(all_args)
ep_args_str = _named_arg_str(endpoint_args)
rn_args_str = _named_arg_str(render_args)
code_str = _REQ_INNER_TMPL.format(all_args=all_args_str,
endpoint_args=ep_args_str,
render_args=rn_args_str)
if verbose:
print code_str # pragma: nocover
d = {'endpoint': endpoint, 'render': render, 'BaseResponse': BaseResponse}
exec compile(code_str, '<string>', 'single') in d
return d['process_request']
| [
"mahmoudrhashemi@gmail.com"
] | mahmoudrhashemi@gmail.com |
eb1d07588582ac984936dd9a07b01a8fec94db97 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/127/usersdata/194/30378/submittedfiles/ex11.py | 846a69c86ba74cbdc026020668fb9f7c79d6d3a8 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # -*- coding: utf-8 -*-
a1=int(input('Digite a1:'))
a2=int(input('Digite a2:'))
m1=int(input('Digite m1:'))
m2=intt(input('Digite m2:'))
d1=int(input('Digite d1:'))
d2=int(input('Digite d2:'))
if a1>a2:
print('%d/%d/%d'%(d1,m1,a1)
elif a1<a2
print('Data2')
else:
if m1>m2:
print('Data1')
elif m1<m2:
print('Data2')
else:
if d1>d2
print('Data1')
elif d1<d2
print('Data 2')
else:
print('Datas iguais')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
0e735a2a29cf2062f9c96b513ed2bed8d6c1c35c | ffa21e4415ead5106f7f846bc24b0d308ace90b5 | /test/test_transfer_challenge_response.py | 0dbcbbf2b2f2eb7e8c7082dfd25b53ceb7dc95e7 | [] | no_license | steini58/swagger-client | fa7b6f077e5a1b01e42c4420b214b19e1d364e4e | e5fd7bf28f8529746e18bdd799c86ad78310ffd5 | refs/heads/master | 2020-03-29T09:14:26.644065 | 2018-09-20T13:29:14 | 2018-09-20T13:29:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | # coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.transfer_challenge_response import TransferChallengeResponse # noqa: E501
from swagger_client.rest import ApiException
class TestTransferChallengeResponse(unittest.TestCase):
"""TransferChallengeResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTransferChallengeResponse(self):
"""Test TransferChallengeResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.transfer_challenge_response.TransferChallengeResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"peter.steinberg@natur-und-genuss.de"
] | peter.steinberg@natur-und-genuss.de |
7ad7c162a36a31a46ea76459a79ebd354b4d3be3 | e6af116d506d0cc4439c119b0c57545522ad766e | /src/ltdconveyor/fastly.py | dac6dbf207da5a81ef5bc0937d715ea92fe35dc1 | [
"MIT"
] | permissive | lsst-sqre/ltd-conveyor | 8369339389afb4a9f3e70509b63bddbd23748da7 | caa017b56308a8bf3d8480d78a7c2e50b22b492b | refs/heads/main | 2023-07-05T12:45:21.855746 | 2023-01-05T23:57:59 | 2023-01-05T23:57:59 | 79,280,673 | 0 | 0 | MIT | 2023-09-11T12:33:25 | 2017-01-17T22:50:51 | Python | UTF-8 | Python | false | false | 1,529 | py | """Management of Fastly CDN caching.
See https://docs.fastly.com/api for background.
"""
import logging
import requests
from ltdconveyor.exceptions import ConveyorError
__all__ = ["purge_key", "FastlyError"]
def purge_key(surrogate_key: str, service_id: str, api_key: str) -> None:
"""Instant purge URLs with a given surrogate key from the Fastly caches.
Parameters
----------
surrogate_key : `str`
Surrogate key header (``x-amz-meta-surrogate-key``) value of objects
to purge from the Fastly cache.
service_id : `str`
Fastly service ID.
api_key : `str`
Fastly API key.
Raises
------
FastlyError
Error with the Fastly API usage.
Notes
-----
This function uses Fastly's ``/service/{service}/purge/{key}`` endpoint.
See the `Fastly Purge documentation <http://ls.st/jxg>`_ for more
information.
For other Fastly APIs, consider using `fastly-py
<https://github.com/fastly/fastly-py>`_.
"""
logger = logging.getLogger(__name__)
api_root = "https://api.fastly.com"
path = "/service/{service}/purge/{surrogate_key}".format(
service=service_id, surrogate_key=surrogate_key
)
logger.info("Fastly purge {0}".format(path))
r = requests.post(
api_root + path,
headers={"Fastly-Key": api_key, "Accept": "application/json"},
)
if r.status_code != 200:
raise FastlyError(r.json)
class FastlyError(ConveyorError):
"""Error related to Fastly API usage."""
| [
"jsick@lsst.org"
] | jsick@lsst.org |
acdf023bbea2a0c641fda84b5856d26b057dc70c | 70f78169207fa9aef8df390d6c3d77124c265c25 | /azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/packet_capture_result.py | ab813c92a6402c8e3dc07802c399388846aedfbc | [
"MIT"
] | permissive | the1bit/azure-sdk-for-python | c89ad1d41a42c7e8ad0f6274cda09364be2d8d11 | 572208cdaa7d917281f6482c67725f514925c9b0 | refs/heads/master | 2021-05-12T13:53:16.194983 | 2018-01-10T01:16:08 | 2018-01-10T01:16:08 | 116,941,214 | 1 | 0 | null | 2018-01-10T10:16:00 | 2018-01-10T10:15:59 | null | UTF-8 | Python | false | false | 3,746 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PacketCaptureResult(Model):
"""Information about packet capture session.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Name of the packet capture session.
:vartype name: str
:ivar id: ID of the packet capture operation.
:vartype id: str
:param etag: Default value: "A unique read-only string that changes
whenever the resource is updated." .
:type etag: str
:param target: The ID of the targeted resource, only VM is currently
supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location:
:type storage_location:
~azure.mgmt.network.v2017_11_01.models.PacketCaptureStorageLocation
:param filters:
:type filters:
list[~azure.mgmt.network.v2017_11_01.models.PacketCaptureFilter]
:param provisioning_state: The provisioning state of the packet capture
session. Possible values include: 'Succeeded', 'Updating', 'Deleting',
'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2017_11_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, target, storage_location, etag="A unique read-only string that changes whenever the resource is updated.", bytes_to_capture_per_packet=0, total_bytes_per_session=1073741824, time_limit_in_seconds=18000, filters=None, provisioning_state=None):
self.name = None
self.id = None
self.etag = etag
self.target = target
self.bytes_to_capture_per_packet = bytes_to_capture_per_packet
self.total_bytes_per_session = total_bytes_per_session
self.time_limit_in_seconds = time_limit_in_seconds
self.storage_location = storage_location
self.filters = filters
self.provisioning_state = provisioning_state
| [
"laurent.mazuel@gmail.com"
] | laurent.mazuel@gmail.com |
cf598447235ef2a8bb6ccb9ee8eb37937c52c1c0 | 08f484c61bf303ee2ec78aff9960f4812fe1e839 | /coldtype/blender/livepreview.py | eb33dc2e861a335079e24d66bc6bb48d6a757afd | [
"Apache-2.0"
] | permissive | rohernandezz/coldtype | 02bee08e021be8dfe45328076c512f06ea8f13ae | 724234fce454699a469d17b6c78ae50fa8138169 | refs/heads/main | 2023-07-27T16:09:10.696755 | 2021-09-11T21:17:55 | 2021-09-11T21:17:55 | 405,537,609 | 0 | 0 | Apache-2.0 | 2021-09-12T03:34:29 | 2021-09-12T03:34:28 | null | UTF-8 | Python | false | false | 1,971 | py | # some code that doesn't really work
import bpy
import blf
import bgl
# import skia
# from OpenGL import GL
# context = skia.GrDirectContext.MakeGL()
# backend = skia.GrBackendRenderTarget(1080, 1080, 0, 0,
# skia.GrGLFramebufferInfo(0, GL.GL_RGBA8))
# surface = skia.Surface.MakeFromBackendRenderTarget(
# context, backend,
# skia.kBottomLeft_GrSurfaceOrigin,
# skia.kRGBA_8888_ColorType,
# skia.ColorSpace.MakeSRGB())
def get_fac():
if bpy.context.space_data.proxy_render_size == 'SCENE':
fac = bpy.context.scene.render.resolution_percentage/100
else:
fac = 1
return fac
def view_zoom_preview():
width = bpy.context.region.width
height = bpy.context.region.height
rv1 = bpy.context.region.view2d.region_to_view(0,0)
rv2 = bpy.context.region.view2d.region_to_view(width-1,height-1)
zoom = (1/(width/(rv2[0]-rv1[0])))/get_fac()
return zoom
class DrawingClass:
def __init__(self, msg):
self.msg = msg
self.handle = bpy.types.SpaceSequenceEditor.draw_handler_add(
self.draw_text_callback, (), 'PREVIEW', 'POST_PIXEL')
def draw_text_callback(self):
#print(">>>", view_zoom_preview())
pt = bpy.context.region.view2d.view_to_region(-540,-540,clip=False)
# font_id = 0 # XXX, need to find out how best to get this.
# blf.position(font_id, pt[0], pt[1], 0)
# blf.size(font_id, 66, 72)
# blf.draw(font_id, "%s" % (self.msg))
#GL.glClear(GL.GL_COLOR_BUFFER_BIT)
#with surface as context:
# context.clear(skia.Color4f(1, 0.3, 0.1, 1))
#surface.flushAndSubmit()
def remove_handle(self):
bpy.types.SpaceSequenceEditor.draw_handler_remove(self.handle, 'PREVIEW')
widgets = {}
def register():
widgets["Test"] = DrawingClass("Test2")
def unregister():
for key, dc in widgets.items():
dc.remove_handle()
if __name__ == "__main__":
register() | [
"rob.stenson@gmail.com"
] | rob.stenson@gmail.com |
581c141ee0cfb3374ff41617241e7f7aee092b96 | d14a5a024966bd54c0555b2f23ecfbf816dd8be4 | /scripts/pixel_shuflle_example.py | ff3b4fb92aea60572f8fefd6c1624a51f283d4f8 | [
"MIT"
] | permissive | Nico-Curti/PhDthesis | ee3f8887c7e3b12a6132f93b239508e9f66043fb | 234b38234eb15870056f71c4f33946d8aed05aae | refs/heads/master | 2022-04-10T04:42:45.284383 | 2020-03-17T09:05:51 | 2020-03-17T09:05:51 | 200,654,056 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,219 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import numpy as np
__author__ = ['Nico Curti']
__email__ = ['nico.curti2@unibo.it']
__package__ = 'PixelShuffle example'
class Shuffler_layer(object):
def __init__(self, scale):
'''
Shuffler Layer, performs a Pixel Shuffle.
input shape (batch, w, h, c) -> (batch, w * scale, h * scale, c // scale**2) out shape
Paramenters:
scale : int, scale of the shuffler.
'''
self.scale = scale
self.scale_step = scale * scale
self.batch, self.w, self.h, self.c = (0, 0, 0, 0)
self.output, self.delta = (None, None)
def __str__(self):
batch, out_width, out_height, out_channels = self.out_shape()
return 'Shuffler x {:3d} {:>4d} x{:>4d} x{:>4d} x{:>4d} -> {:>4d} x{:>4d} x{:>4d} x{:>4d}'.format(
self.scale,
batch, self.w, self.h, self.c,
batch, out_width, out_height, out_channels)
def out_shape(self):
return (self.batch, self.w * self.scale, self.h * self.scale, self.c // (self.scale_step))
def _phase_shift(self, inpt, scale):
'''
Shuffles of the pixel in a given input
Parameters:
inpt : the input of this function is not the entire batch of images, but only
a N channels at a time taken from every image, where N = out_c // scale**2
scale : int, scale factor of the layer
'''
b, w, h, c = inpt.shape
X = inpt.transpose(1, 2, 3, 0).reshape(w, h, scale, scale, b)
X = np.concatenate(X, axis=1)
X = np.concatenate(X, axis=1)
X = X.transpose(2, 0, 1)
return np.reshape(X, (b, w * scale, h * scale, 1))
def _reverse(self, delta, scale):
'''
Reverse function of _phase_shift
Parameters:
delta : input batch of deltas with shape (batch, out_w, out_h, 1)
scale : int ,scale factor of the layer
'''
# This function apply numpy.split as a reverse function to numpy.concatenate
# along the same axis also
delta = delta.transpose(1, 2, 0)
delta = np.asarray(np.split(delta, self.h, axis=1))
delta = np.asarray(np.split(delta, self.w, axis=1))
delta = delta.reshape(self.w, self.h, scale*scale, self.batch)
# It returns an output of the correct shape (batch, in_w, in_h, scale**2)
# for the concatenate in the backward function
return delta.transpose(3, 0, 1, 2)
def forward(self, inpt):
'''
Forward function of the shuffler layer: it recieves as input an image in
the format ('batch' not yet , in_w, in_h, in_c) and it produce an output
with shape ('batch', in_w * scale, in_h * scale, in_c // scale**2)
Parameters:
inpt : input batch of images to be reorganized, with format (batch, in_w, in_h, in_c)
'''
self.batch, self.w, self.h, self.c = inpt.shape
channel_output = self.c // self.scale_step # out_C
# The function phase shift receives only in_c // out_c channels at a time
# the concatenate stitches toghether every output of the function.
self.output = np.concatenate([self._phase_shift(inpt[:, :, :, range(i, self.c, channel_output)], self.scale)
for i in range(channel_output)], axis=3)
# output shape = (batch, in_w * scale, in_h * scale, in_c // scale**2)
def backward(self, delta):
'''
Backward function of the shuffler layer: it reorganize the delta to match the
input shape, the operation is the exact inverse of the forward pass.
Parameters:
delta : global delta to be backpropagated with shape (batch, out_w, out_h, out_c)
'''
channel_out = self.c // self.scale_step #out_c
# I apply the reverse function only for a single channel
X = np.concatenate([self._reverse(self.delta[:, :, :, i],self.scale)
for i in range(channel_out)], axis=3)
# The 'reverse' concatenate actually put the correct channels toghether but in a
# weird order, so this part sorts the 'layers' correctly
idx = sum([list(range(i, self.c, channel_out)) for i in range(channel_out)], [])
idx = np.argsort(idx)
delta[:] = X[:, :, :, idx]
if __name__ == '__main__':
from PIL import Image
from PIL import ImageDraw
import seaborn as sns
import pylab as plt
from matplotlib.colors import ListedColormap
def draw_grid (image):
draw = ImageDraw.Draw(image)
y_start = 0
y_end = image.height
step_size = int(image.width / 5)
for x in range(0, image.width, step_size):
line = ((x, y_start), (x, y_end))
draw.line(line, fill=0)
x_start = 0
x_end = image.width
for y in range(0, image.height, step_size):
line = ((x_start, y), (x_end, y))
draw.line(line, fill=0)
return image
colors = sns.color_palette('hls', 9)
# suppose img1 and img2 are your two images
imgs = [Image.new('RGB', size=(51, 51), color=(int(colors[i][0]*255),
int(colors[i][1]*255),
int(colors[i][2]*255)
))
for i in range(9)]
sx, sy = (3, 3)
nw, nh = ( imgs[0].size[0] + sx*(len(imgs)-1),
imgs[0].size[1] + sy*(len(imgs)-1)
)
imgs = [draw_grid(im) for im in imgs]
res = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
for i, im in enumerate(imgs):
res.paste(im, (sx*i, sy*i))
# Pixel shuffle
input = np.arange(0, 50 * 50 * 9).reshape(1, 9, 50, 50)
input = input.transpose(0, 2, 3, 1) # Nice visualizations with the transpose arange
cmap = ListedColormap(colors.as_hex())
layer = Shuffler_layer(scale=3)
layer.forward(input)
forward_out = layer.output
plt.rc('grid', linestyle="-", color='black')
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 7))
fig.subplots_adjust(left=0.05, right=0.95, top=0.9, bottom=0.,
wspace=0.3)
fig.suptitle('scale = {}'.format(3), fontsize=24, y=0.075)
ax1.imshow(res)
ax1.set_title('Original Image\n($scale^2$ channels)', fontsize=24, y=1.1)
ax1.axis('off')
ax2.imshow(forward_out[0,:10,:10,0], cmap=cmap)
ax2.set_title('High Resolution Image', fontsize=24, y=1.1)
# And a corresponding grid
ax2.grid(which='both')
# Or if you want different settings for the grids:
ax2.grid(which='minor', alpha=1, linewidth=2)
ax2.grid(which='major', alpha=1, linewidth=2)
ax2.set_xticks(np.arange(-0.5, 10.5, 1))
ax2.set_xticks(np.arange(-0.5, 10.5, 1), minor=True)
ax2.set_yticks(np.arange(-0.5, 10.5, 1))
ax2.set_yticks(np.arange(-0.5, 10.5, 1), minor=True)
ax2.axes.get_xaxis().set_ticks([])
ax2.axes.get_yaxis().set_ticks([])
bbox_props = dict(boxstyle="rarrow,pad=0.3", fc="w", ec="k", lw=2)
t = ax1.text(85, 40, "Shuffle", ha="center", va="center", rotation=0,
size=15,
bbox=bbox_props)
fig.savefig('../img/pixel_shuffle.svg')
plt.show()
| [
"nico.curti2@unibo.it"
] | nico.curti2@unibo.it |
3feec8d2f70642c3206d62a021d808b084c70638 | 6b6e20004b46165595f35b5789e7426d5289ea48 | /workers/test/test_storagereplication.py | 8487b1d9782ad5c0d8f584900980a26779918436 | [
"Apache-2.0"
] | permissive | anwarchk/quay | 2a83d0ab65aff6a1120fbf3a45dd72f42211633b | 23c5120790c619174e7d36784ca5aab7f4eece5c | refs/heads/master | 2020-09-12T18:53:21.093606 | 2019-11-15T19:29:02 | 2019-11-15T19:29:02 | 222,517,145 | 0 | 0 | Apache-2.0 | 2019-11-18T18:32:35 | 2019-11-18T18:32:35 | null | UTF-8 | Python | false | false | 7,354 | py | import hashlib
import pytest
from data import model, database
from storage.basestorage import StoragePaths
from storage.fakestorage import FakeStorage
from storage.distributedstorage import DistributedStorage
from workers.storagereplication import (StorageReplicationWorker, JobException,
WorkerUnhealthyException)
from test.fixtures import *
@pytest.fixture()
def storage_user(app):
user = model.user.get_user('devtable')
database.UserRegion.create(user=user,
location=database.ImageStorageLocation.get(name='local_us'))
database.UserRegion.create(user=user,
location=database.ImageStorageLocation.get(name='local_eu'))
return user
@pytest.fixture()
def storage_paths():
return StoragePaths()
@pytest.fixture()
def replication_worker():
return StorageReplicationWorker(None)
@pytest.fixture()
def storage():
return DistributedStorage({'local_us': FakeStorage('local'), 'local_eu': FakeStorage('local')},
['local_us'])
def test_storage_replication_v1(storage_user, storage_paths, replication_worker, storage, app):
# Add a storage entry with a V1 path.
v1_storage = model.storage.create_v1_storage('local_us')
content_path = storage_paths.v1_image_layer_path(v1_storage.uuid)
storage.put_content(['local_us'], content_path, 'some content')
# Call replicate on it and verify it replicates.
replication_worker.replicate_storage(storage_user, v1_storage.uuid, storage)
# Ensure that the data was replicated to the other "region".
assert storage.get_content(['local_eu'], content_path) == 'some content'
locations = model.storage.get_storage_locations(v1_storage.uuid)
assert len(locations) == 2
def test_storage_replication_cas(storage_user, storage_paths, replication_worker, storage, app):
# Add a storage entry with a CAS path.
content_checksum = 'sha256:' + hashlib.sha256('some content').hexdigest()
cas_storage = database.ImageStorage.create(cas_path=True, content_checksum=content_checksum)
location = database.ImageStorageLocation.get(name='local_us')
database.ImageStoragePlacement.create(storage=cas_storage, location=location)
content_path = storage_paths.blob_path(cas_storage.content_checksum)
storage.put_content(['local_us'], content_path, 'some content')
# Call replicate on it and verify it replicates.
replication_worker.replicate_storage(storage_user, cas_storage.uuid, storage)
# Ensure that the data was replicated to the other "region".
assert storage.get_content(['local_eu'], content_path) == 'some content'
locations = model.storage.get_storage_locations(cas_storage.uuid)
assert len(locations) == 2
def test_storage_replication_missing_base(storage_user, storage_paths, replication_worker, storage,
app):
# Add a storage entry with a CAS path.
content_checksum = 'sha256:' + hashlib.sha256('some content').hexdigest()
cas_storage = database.ImageStorage.create(cas_path=True, content_checksum=content_checksum)
location = database.ImageStorageLocation.get(name='local_us')
database.ImageStoragePlacement.create(storage=cas_storage, location=location)
# Attempt to replicate storage. This should fail because the layer is missing from the base
# storage.
with pytest.raises(JobException):
replication_worker.replicate_storage(storage_user, cas_storage.uuid, storage,
backoff_check=False)
# Ensure the storage location count remains 1. This is technically inaccurate, but that's okay
# as we still require at least one location per storage.
locations = model.storage.get_storage_locations(cas_storage.uuid)
assert len(locations) == 1
def test_storage_replication_copy_error(storage_user, storage_paths, replication_worker, storage,
app):
# Add a storage entry with a CAS path.
content_checksum = 'sha256:' + hashlib.sha256('some content').hexdigest()
cas_storage = database.ImageStorage.create(cas_path=True, content_checksum=content_checksum)
location = database.ImageStorageLocation.get(name='local_us')
database.ImageStoragePlacement.create(storage=cas_storage, location=location)
content_path = storage_paths.blob_path(cas_storage.content_checksum)
storage.put_content(['local_us'], content_path, 'some content')
# Tell storage to break copying.
storage.put_content(['local_us'], 'break_copying', 'true')
# Attempt to replicate storage. This should fail because the write fails.
with pytest.raises(JobException):
replication_worker.replicate_storage(storage_user, cas_storage.uuid, storage,
backoff_check=False)
# Ensure the storage location count remains 1.
locations = model.storage.get_storage_locations(cas_storage.uuid)
assert len(locations) == 1
def test_storage_replication_copy_didnot_copy(storage_user, storage_paths, replication_worker,
storage, app):
# Add a storage entry with a CAS path.
content_checksum = 'sha256:' + hashlib.sha256('some content').hexdigest()
cas_storage = database.ImageStorage.create(cas_path=True, content_checksum=content_checksum)
location = database.ImageStorageLocation.get(name='local_us')
database.ImageStoragePlacement.create(storage=cas_storage, location=location)
content_path = storage_paths.blob_path(cas_storage.content_checksum)
storage.put_content(['local_us'], content_path, 'some content')
# Tell storage to fake copying (i.e. not actually copy the data).
storage.put_content(['local_us'], 'fake_copying', 'true')
# Attempt to replicate storage. This should fail because the copy doesn't actually do the copy.
with pytest.raises(JobException):
replication_worker.replicate_storage(storage_user, cas_storage.uuid, storage,
backoff_check=False)
# Ensure the storage location count remains 1.
locations = model.storage.get_storage_locations(cas_storage.uuid)
assert len(locations) == 1
def test_storage_replication_copy_unhandled_exception(storage_user, storage_paths,
replication_worker, storage, app):
# Add a storage entry with a CAS path.
content_checksum = 'sha256:' + hashlib.sha256('some content').hexdigest()
cas_storage = database.ImageStorage.create(cas_path=True, content_checksum=content_checksum)
location = database.ImageStorageLocation.get(name='local_us')
database.ImageStoragePlacement.create(storage=cas_storage, location=location)
content_path = storage_paths.blob_path(cas_storage.content_checksum)
storage.put_content(['local_us'], content_path, 'some content')
# Tell storage to raise an exception when copying.
storage.put_content(['local_us'], 'except_copying', 'true')
# Attempt to replicate storage. This should fail because the copy raises an unhandled exception.
with pytest.raises(WorkerUnhealthyException):
replication_worker.replicate_storage(storage_user, cas_storage.uuid, storage,
backoff_check=False)
# Ensure the storage location count remains 1.
locations = model.storage.get_storage_locations(cas_storage.uuid)
assert len(locations) == 1
| [
"jimmy.zelinskie+git@gmail.com"
] | jimmy.zelinskie+git@gmail.com |
8d9c004eeae662f7add61e60531a5c45afbf757d | 83ed1e2f176133c03a5f6dfa504b8df15ae71efb | /projects/nonhomol_pdbJul05/outputBestTMcentroids.py | d37d874817feb9afaed091de0bd0cec176e91284 | [] | no_license | jmborr/code | 319db14f28e1dea27f9fc703be629f171e6bd95f | 32720b57699bf01803367566cdc5fff2b6bce810 | refs/heads/master | 2022-03-09T16:11:07.455402 | 2019-10-28T15:03:01 | 2019-10-28T15:03:01 | 23,627,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | #!/usr/bin/python
import sys,os,re
from inputArgs.inputArgs import inpHand,deglobb
from utilities.small_utilities import chomp
from spicker.spickerResultsManager import spickOut
ih=inpHand('Usage: outputBestTMcentroids.py [options]\nGiven a list of headers, and a xxxxx-globbed directory,\ngoes to each de-globbed directory and assumes it will find\nsummary.txt, centroids.pdb, closest.pdb and a native file.\nThen finds centroid with best TM score to native, and outputs it as file "bestTMcentroid.pdb" in the de-globbed directory\n',
' -a _RA_ind (xxxxx-globbed) directory where summary.txt is (def=curr dir)',
' -b _RA_list list of headers',
' -c __nat native filename, supposed to be in SAME DIRECTORY as each summary.txt (xxxxx globbing allowed, def=CA)',
' -d __topc number of top clusters (ranked by density) from which select the best TM (def=5)',
' -e __seqdg (xxxxx-globbed) seq.dat file, in case we want TM-score for only non-coil assigned residues'
)
ih.parse(locals(),sys.argv)
#defaults
if not ind: int=os.getcwd()
if not nat: nat='CA'
if not topc: topc=5
else: topc=int(topc)
for header in chomp(open(list,'r').readlines()):
dir=deglobb(ind,header) #de-globb directory
nat2=deglobb(nat,header) #de-globb native file name
if seqdg: seqd=deglobb(seqdg,header)
p=spickOut(dir=dir,nat=nat2,target=header) #instantiate a spickOut object
if p.readError: #some error reading files, go to next header
sys.stderr.write(header+' '+p.readError+'\n')
continue
rankdens=p.rankIDsByDens()[0:topc] #Cluster's ids by decreasing density, up to 'topc' clusters
if seqdg:
bestTMId=p.rankIDsByTMonlySecStrToNat(seqd,list=rankdens)[0]
print header+' %5.3f'%(p.TMonlySecStrToNat(seqd,id=bestTMId))
else:
p.initTMtoNat() #TMscore of each cluster centroid to native
bestTMId=p.rankIDsByTMtoNat(list=rankdens)[0] #biggest TM to native
print header+' %5.3f'%(p.tms[bestTMId])
p.printCentroid(dir+"/bestTMcentroid.pdb",id=bestTMId)
| [
"borreguero@gmail.com"
] | borreguero@gmail.com |
1c5053a29e9e60526ee60fe5fceaf7190569fb32 | 0a3e0d965c3cce519af0ae6752292d4c48ef7734 | /backend/api/serializers/OrganizationType.py | c9b8b20e8e05b1e9a6313423ecc8a169a1e1e5f1 | [
"Apache-2.0"
] | permissive | kuanfandevops/tfrs | ae66d1e36815e2bf4d0c2470d1aeb3d03f6cfb38 | 80ae1ef5938ef5e580128ed0c622071b307fc7e1 | refs/heads/master | 2023-07-15T09:10:53.970498 | 2023-06-23T21:13:44 | 2023-06-23T21:13:44 | 140,875,622 | 0 | 0 | Apache-2.0 | 2018-09-12T23:52:55 | 2018-07-13T17:33:17 | Python | UTF-8 | Python | false | false | 1,238 | py | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from api.models.OrganizationType import OrganizationType
class OrganizationTypeSerializer(serializers.ModelSerializer):
class Meta:
model = OrganizationType
fields = (
'id', 'type', 'description', 'effective_date',
'expiration_date',
'display_order')
| [
"31664961+kuanfandevops@users.noreply.github.com"
] | 31664961+kuanfandevops@users.noreply.github.com |
dbdb9847104a881adc5af3609e9b71c1b1bea4b0 | 8347c8c11e9ca90d2ec0ce7a58028f090b10dd17 | /utilities/selenium_util.py | 3aaa0e062fc7dfdac61e4e9b542eb6bcf63d4737 | [] | no_license | hsiang0107/Rebotframework2.7 | 4ee7ff9926c620413797f411141ed89b2edcb660 | 2079d6a9904330abb4a199f48640e0c830e627db | refs/heads/master | 2022-12-04T05:35:38.474744 | 2018-09-11T17:20:19 | 2018-09-11T17:20:19 | 146,104,457 | 0 | 0 | null | 2022-11-22T02:50:21 | 2018-08-25T14:57:47 | Python | UTF-8 | Python | false | false | 304 | py | from selenium import webdriver
def go_to_url(url, size=None):
driver = webdriver.Chrome()
if size is not None:
x, y = size
driver.set_window_size(x, y)
else:
driver.maximize_window()
driver.get(url)
return driver
| [
"hsiang0107@hotmail.com"
] | hsiang0107@hotmail.com |
5023e2af381df98e0817848e8123c4e8ce7b5a57 | 94f584fb8ed0a0d23c8a03fe402e4cfcd57aa956 | /slurm/3.ext_regions.py | d9ddc1f9c2a534a0f627d9f65e255ae4fb3d2ac3 | [] | no_license | vsoch/neurosynth-nlp | 3627b198cfea20848048bc9ee30e24429385c31f | f63adcae79744b9058e4be7a2a7125ddbb647076 | refs/heads/master | 2020-12-24T12:02:21.993536 | 2015-10-10T00:12:17 | 2015-10-10T00:12:17 | 41,841,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | #! /usr/bin/env python
# Sample input data (piped into STDIN):
'''
118238@10\tSen.~^~Barack~^~Obama~^~and~^~his~^~wife~^~,~^~Michelle~^~Obama~^~,~^~have~^~released~^~eight~^~years~^~of~^~joint~^~returns~^~.\tO~^~PERSON~^~PERSON~^~O~^~O~^~O~^~O~^~PERSON~^~PERSON~^~O~^~O~^~O~^~DURATION~^~DURATION~^~O~^~O~^~O~^~O
118238@12\tDuring~^~the~^~2004~^~presidential~^~campaign~^~,~^~we~^~urged~^~Teresa~^~Heinz~^~Kerry~^~,~^~the~^~wealthy~^~wife~^~of~^~Sen.~^~John~^~Kerry~^~,~^~to~^~release~^~her~^~tax~^~returns~^~.\tO~^~O~^~DATE~^~O~^~O~^~O~^~O~^~O~^~PERSON~^~PERSON~^~PERSON~^~O~^~O~^~O~^~O~^~O~^~O~^~PERSON~^~PERSON~^~O~^~O~^~O~^~O~^~O~^~O~^~O
For example, this sentence:
the cerebellum is part of the hypothalamus and so is the anterior of the hypothalamus
(yes, it's nonsense)
Will produce objects of format:
(start,length,text)
[(1, 1, ['cerebellum']),
(6, 1, ['hypothalamu']),
(14, 1, ['hypothalamu'])]
'''
from nlp import find_phrases
import json
import os
import sys
json_lookup = sys.argv[1]
sentences_file = sys.argv[2]
start = int(sys.argv[3])
end = int(sys.argv[4])
error_file = sys.argv[5]
# Get sentences
sentences_file = open(sentences_file,"rb")
sentences = sentences_file.readlines()[start:end]
sentences_file.close()
# PARSE SENTENCES HERE.
lines = [s.strip("\n") for s in sentences]
# We will write to an output file
output_file = error_file.replace(".err",".txt")
filey = open(output_file,'w')
# Read in the json with brain regions
region_dict = json.load(open(json_lookup,"rb"))
# Make a big list of all regionnames
regions = []
for r in region_dict:
regions = regions + r["variants"]
# For-loop for each row in the input query
for l in range(0,len(lines)):
try:
line = lines[l]
# Find phrases that are continuous words tagged with PERSON.
sentence_id, words_str = line.strip().replace('"','').strip('}').split('{')
sentence_id = sentence_id.strip(",")
words = words_str.split(",")
words = [w.replace(")","").replace("(","") for w in words]
phrases = find_phrases(words,regions)
# Insert into mentions table
for start_position, length, text in phrases:
mention_id = '%s_%d' % (sentence_id, start_position)
insert_statement = "%s,%s,%s,%s,%s\n" %(sentence_id,start_position,length," ".join(text),mention_id)
filey.writelines(insert_statement)
except:
if not os.path.exists(error_file):
efiley = open(error_file,"w")
efiley.writelines("%s\n" %(line))
print "Error with line %s" %line
filey.close()
if os.path.exists(error_file):
efiley.close()
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
11b090e15b87037e149bb672dd109cbce957d59b | 6d24fb1c67771e7285dea61840f9766013589dd1 | /gglobal/service/models.py | 8d3e4d4346d02e815b1afc4175b83e74979dd73e | [] | no_license | PandaBalu/gglobal | b045fb66f7daea8eeb8d6c62f5dc872ff0b1b246 | c063c44c30d023bf562c0b4b39d10161540e7a92 | refs/heads/master | 2020-12-10T03:14:00.710720 | 2017-06-26T12:22:05 | 2017-06-26T12:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
from django.utils.text import slugify
# Create your models here.
class Service(MPTTModel):
slug = models.CharField(max_length=50, unique=True, null=True)
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
accepted = models.BooleanField(default=False)
troubles = models.ManyToManyField('service.Trouble', related_name='service')
class Meta:
verbose_name = "Услуга"
verbose_name_plural = "Услуги"
class MPTTMeta:
#level_attr = 'name'
order_insertion_by = ['name']
def save(self, *args, **kwargs):
if not self.id or self.slug:
self.slug = slugify(self.name)
super(Service, self).save(*args, **kwargs)
def __str__(self):
return '%s' % self.name
class Trouble(MPTTModel):
slug = models.CharField(max_length=50, unique=True, null=True)
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
accepted = models.BooleanField(default=False)
class Meta:
verbose_name = "Проблема"
verbose_name_plural = "Проблемы"
class MPTTMeta:
#level_attr = 'name'
order_insertion_by = ['name']
def save(self, *args, **kwargs):
if not self.id or self.slug:
self.slug = slugify(self.name)
super(Trouble, self).save(*args, **kwargs)
def __str__(self):
return '%s' % self.name
| [
"narnikgamarnikus@gmail.com"
] | narnikgamarnikus@gmail.com |
d3937c2044e5f71e5336f23938e6cf4e43390de7 | eef4d2330edb808acdb82b92621f927db4911dda | /Stepik/Bioinformatics Institute/Python основы и применение/3/3.4/3.4.2.py | cb3e6f4a939a96b443dd1c028a12e50b911be81c | [] | no_license | OkS369/Education | 648308c755dab6e8510c507211005f34cbed495d | c44f6b0223b567753089627056429d1f4dab7368 | refs/heads/master | 2023-05-24T06:31:10.869633 | 2021-06-15T13:52:23 | 2021-06-15T13:52:23 | 225,942,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | import requests
import re
url = input().strip()
response = requests.get(url).text
pattern = r'''(?:<a.*href=(?:\'|\"))(?:.{0,6}://)*(w*\.?(?:[\w\d-]+\.)+\w+)(?:/?)'''
URLs = re.findall(pattern, response)
ans = sorted(list(set(URLs)))
for i in ans:
print(i)
'''
Вашей программе на вход подается ссылка на HTML файл.
Вам необходимо скачать этот файл, затем найти в нем все ссылки вида <a ... href="..." ... > и вывести список сайтов,
на которые есть ссылка.
Сайтом в данной задаче будем называть имя домена вместе с именами поддоменов.
То есть, это последовательность символов, которая следует сразу после символов протокола, если он есть,
до символов порта или пути, если они есть, за исключением случаев с относительными ссылками вида
<a href="../some_path/index.html">.
Сайты следует выводить в алфавитном порядке.
# url = 'http://pastebin.com/raw/2mie4QYa'
# url = 'http://pastebin.com/raw/hfMThaGb'
# url = 'http://pastebin.com/raw/7543p0ns'
''' | [
"romanincognito17@gmail.com"
] | romanincognito17@gmail.com |
c2e49b4e68fda9e783ecf1bc4c35556ee4c5d225 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/493.py | 56e00027e5559fd9c2fc33ff02b29cf4d45a1214 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | T = int(input())
memo = {}
def rec(n, k):
if (n, k) not in memo:
if k <= 1:
memo[(n, k)] = ((n-1)//2, (n)//2)
else:
if (k-1)//2 == 0:
memo[(n, k)] = rec((n)//2, k//2)
else:
memo[(n, k)] = min(rec((n-1)//2, (k-1)//2), rec(n//2, k//2))
return memo[(n, k)]
for t in range(T):
n, k = map(int, input().split())
r, l = rec(n, k)
print("Case #", t+1, ": ", max(r, l), " ", min(l, r), sep="")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
12d7ca226c31af0074999b3f89eeb22dbd0dd161 | d9d9a203a27bd28fe9afc72ecc613b186b33d673 | /06_MultipleForm/otherform.py | 23f5df49d83d121357009779d983aeca814be2db | [] | no_license | wildenali/Belajar-GUI-dengan-pyQT | 378951fcf0e172f48bf71ec46d887599cf5e09ed | 06ebbcbf57bec8a6a63fbb6d5397a7e2ab7c9ef9 | refs/heads/master | 2020-04-06T10:51:58.582049 | 2018-12-31T10:37:56 | 2018-12-31T10:37:56 | 157,395,034 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from PyQt5.QtWidgets import QWidget, QPushButton
class OtherForm(QWidget):
def __init__(self):
super(OtherForm, self).__init__()
self.setupUI()
def setupUI(self):
self.resize(200,200)
self.move(500,100)
self.setWindowTitle('Form KEDUAAAAAA')
self.button = QPushButton('keluar')
self.button.move(50,50)
self.button.setParent(self)
self.button.clicked.connect(self.buttonClick)
def buttonClick(self):
self.close()
| [
"wildeeeeen@gmail.com"
] | wildeeeeen@gmail.com |
fbcbd4ed3beb6278a8d99d272b86f33d9b28a123 | 8520c991dc543f5f4e1efe59ab401824173bb985 | /140-word-break-ii/solution.py | 222e0e8c6a54047e293774ef0255a5fe234a6bb5 | [] | no_license | katryo/leetcode | d44f70f2853c4f5ea9a462d022feb0f5436c2236 | 0da45559271d3dba687858b8945b3e361ecc813c | refs/heads/master | 2020-03-24T12:04:53.859047 | 2020-02-18T04:27:55 | 2020-02-18T04:27:55 | 142,703,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from typing import List
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
return self.helper(s, wordDict, {})
def helper(self, s, wordDict, memo):
if s in memo:
return memo[s]
if not s:
return
res = []
for word in wordDict:
if not s.startswith(word):
continue
if s == word:
res.append(word)
continue
rest = self.helper(s[len(word):], wordDict, memo)
for item in rest:
res.append(word + " " + item)
memo[s] = res
return res
if __name__ == '__main__':
s = Solution()
print(s.wordBreak("catsanddog", ["cat", "cats", "and", "sand", "dog"]))
print(s.wordBreak("pineapplepenapple", ["apple", "pen", "applepen", "pine", "pineapple"]))
print(s.wordBreak("catsandog", ["cats", "dog", "sand", "and", "cat"]))
# print(s.wordBreak("aaaaaaaaaaaaaaaaaaaaaa",
# ["a","aa","aaa","aaaa","aaaaa","aaaaaa","aaaaaaa","aaaaaaaa","aaaaaaaaa","aaaaaaaaaa"]))
print(s.wordBreak("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
["a","aa","aaa","aaaa","aaaaa","aaaaaa","aaaaaaa","aaaaaaaa","aaaaaaaaa","aaaaaaaaaa"]))
| [
"katoryo55@gmail.com"
] | katoryo55@gmail.com |
cc3aec26c0ec40f71c759c9452538a647ef2a5fd | 84a70e27982094ac78d34642674f4e873d571b23 | /src/apps/productos/migrations/0001_initial.py | 9475cb3ad20a66a1dacfe7377a3afa5a65dd7531 | [] | no_license | valenciacamilo12/Pagina-Web-Farmacia---Django- | 2b9eef0b0f5011247274a528d63fdaa0ed557fad | 7c8a9ac78bd2cf48706a2ef76cb32ac0824094ea | refs/heads/master | 2020-07-02T09:38:31.978196 | 2019-08-16T03:07:58 | 2019-08-16T03:07:58 | 201,487,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-08-09 16:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('cod_cate', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nom_cate', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='DetalleOrdenPedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom_pro', models.CharField(max_length=20)),
('cantidad', models.IntegerField()),
('precio_venta', models.CharField(max_length=20)),
('importe', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Distrito',
fields=[
('cod_dis', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nom_dis', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='OrdenPedido',
fields=[
('num_ordenpedido', models.AutoField(primary_key=True, serialize=False, unique=True)),
('fecha', models.CharField(max_length=20)),
('cod_tipopago', models.CharField(max_length=30)),
('total', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Presentacion',
fields=[
('cod_prese', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nom_pre', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Producto',
fields=[
('cod_producto', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nom_producto', models.CharField(max_length=30)),
('pre_venta', models.CharField(max_length=30)),
('pre_compra', models.CharField(max_length=30)),
('fecha_ven', models.CharField(max_length=30)),
('stock', models.CharField(max_length=30)),
('cod_cate', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='productos.Categoria')),
('cod_pres', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='productos.Presentacion')),
],
),
migrations.CreateModel(
name='Proveedor',
fields=[
('cod_proveedor', models.AutoField(primary_key=True, serialize=False, unique=True)),
('nom_prov', models.CharField(max_length=20)),
('dr_prov', models.CharField(max_length=20)),
('telefono', models.IntegerField(verbose_name=10)),
('celular', models.IntegerField(verbose_name=10)),
('id_distrito', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='productos.Distrito')),
],
),
migrations.AddField(
model_name='producto',
name='cod_prove',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='productos.Proveedor'),
),
migrations.AddField(
model_name='detalleordenpedido',
name='cod_pro',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='productos.Producto'),
),
]
| [
"camilo3341215@gmail.com"
] | camilo3341215@gmail.com |
87389fab233b12f273aa99a31f10495d9920e04c | b83cf5e907a66206b5559057d15e40174f459d4d | /bin/pilfile.py | 4817ebdd74b3c6d03583202c14e6df747d18d8a2 | [] | no_license | srakrnxKU/204215-snake | 59fa8156b660555a6ed1e17aafa422a68c2b8886 | 1fa16c3a0556563f035a66846fc58b2b998f2593 | refs/heads/master | 2021-06-27T12:22:40.128425 | 2017-09-15T04:13:31 | 2017-09-15T04:13:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,715 | py | #!/home/srakrn/Works/sophomore/oop_lab/01204215-snake/bin/python3.6
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| [
"tanersirakorn@gmail.com"
] | tanersirakorn@gmail.com |
7a4379c0bdb0693279b1d0a159b4d360ff992a56 | 9dc3ae479c1b5c6941681917151fcb0379f9173d | /CanvasTokenScopes.py | 3ca3d0065fc1c16b3862bc555e8971f2f33b6028 | [] | no_license | cthacker-udel/Python-Canvas-API-Wrapper | bf2400b42b644791f45bbda7ed42e2c03a8d97b2 | 0263c591a2b02197529559346558b9be02f592c3 | refs/heads/master | 2023-08-25T12:01:48.417204 | 2021-10-09T10:49:51 | 2021-10-09T10:49:51 | 388,362,237 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | from CanvasClient import CanvasClient
class CanvasTokenScopes(CanvasClient):
def __init__(self):
self.group_by = None
self.account_id = None
def generate_queries(self):
body = {}
if self.group_by != None:
body['group_by'] = self.group_by
return body
def clear_queries(self):
self.group_by = None | [
"cthacker@udel.edu"
] | cthacker@udel.edu |
cbfdcb536efb944dbd79923ca52eafaeaf68bf09 | dee468400b97faa9926a8f80be9d400fab2c6d85 | /tests/db_functions/comparison/test_greatest.py | ef93d808c23b0c1cf4b6149adf810b0cb1a223f1 | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive",
"Python-2.0"
] | permissive | claudep/django | 0a8eec4039ddb57fc3c31ae03b313fccdeb6a063 | f1a808a5025b63715d1034af2b96a6a5241d29e9 | refs/heads/master | 2023-09-01T05:08:41.544950 | 2020-04-15T11:11:13 | 2020-04-15T16:31:30 | 4,217,165 | 3 | 2 | BSD-3-Clause | 2023-08-27T16:40:58 | 2012-05-03T18:20:44 | Python | UTF-8 | Python | false | false | 3,836 | py | from datetime import datetime, timedelta
from decimal import Decimal
from unittest import skipIf, skipUnless
from django.db import connection
from django.db.models.expressions import RawSQL
from django.db.models.functions import Coalesce, Greatest
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import timezone
from ..models import Article, Author, DecimalModel, Fan
class GreatestTests(TestCase):
def test_basic(self):
now = timezone.now()
before = now - timedelta(hours=1)
Article.objects.create(title='Testing with Django', written=before, published=now)
articles = Article.objects.annotate(last_updated=Greatest('written', 'published'))
self.assertEqual(articles.first().last_updated, now)
@skipUnlessDBFeature('greatest_least_ignores_nulls')
def test_ignores_null(self):
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
articles = Article.objects.annotate(last_updated=Greatest('written', 'published'))
self.assertEqual(articles.first().last_updated, now)
@skipIfDBFeature('greatest_least_ignores_nulls')
def test_propagates_null(self):
Article.objects.create(title='Testing with Django', written=timezone.now())
articles = Article.objects.annotate(last_updated=Greatest('written', 'published'))
self.assertIsNone(articles.first().last_updated)
@skipIf(connection.vendor == 'mysql', "This doesn't work on MySQL")
def test_coalesce_workaround(self):
past = datetime(1900, 1, 1)
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
articles = Article.objects.annotate(
last_updated=Greatest(
Coalesce('written', past),
Coalesce('published', past),
),
)
self.assertEqual(articles.first().last_updated, now)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific workaround")
def test_coalesce_workaround_mysql(self):
past = datetime(1900, 1, 1)
now = timezone.now()
Article.objects.create(title='Testing with Django', written=now)
past_sql = RawSQL("cast(%s as datetime)", (past,))
articles = Article.objects.annotate(
last_updated=Greatest(
Coalesce('written', past_sql),
Coalesce('published', past_sql),
),
)
self.assertEqual(articles.first().last_updated, now)
def test_all_null(self):
Article.objects.create(title='Testing with Django', written=timezone.now())
articles = Article.objects.annotate(last_updated=Greatest('published', 'updated'))
self.assertIsNone(articles.first().last_updated)
def test_one_expressions(self):
with self.assertRaisesMessage(ValueError, 'Greatest must take at least two expressions'):
Greatest('written')
def test_related_field(self):
author = Author.objects.create(name='John Smith', age=45)
Fan.objects.create(name='Margaret', age=50, author=author)
authors = Author.objects.annotate(highest_age=Greatest('age', 'fans__age'))
self.assertEqual(authors.first().highest_age, 50)
def test_update(self):
author = Author.objects.create(name='James Smith', goes_by='Jim')
Author.objects.update(alias=Greatest('name', 'goes_by'))
author.refresh_from_db()
self.assertEqual(author.alias, 'Jim')
def test_decimal_filter(self):
obj = DecimalModel.objects.create(n1=Decimal('1.1'), n2=Decimal('1.2'))
self.assertCountEqual(
DecimalModel.objects.annotate(
greatest=Greatest('n1', 'n2'),
).filter(greatest=Decimal('1.2')),
[obj],
)
| [
"timograham@gmail.com"
] | timograham@gmail.com |
75e707efda8592ffe79b23e99eb14f936b9abe00 | f03064e9f7fbd5d0344812fae45439905627f2a8 | /helga/general/setup/helga_launcher/lib/helga_launcher_dcc_button.py | be20e072710f65be6dd7076f7014278644bdc699 | [] | no_license | tws0002/helga | 45324a4acfde5054c452329de8cfdd38de4f8bda | 80f44393a5f1b3038d4ce3dc5057989ad7d3ef28 | refs/heads/master | 2021-01-12T17:21:04.802566 | 2015-04-16T20:39:06 | 2015-04-16T20:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,350 | py |
"""
helga_launcher_dcc_button
==========================================
Subclass of QPushButton to allow for customized drag&drop behaviour
"""
#Import
#------------------------------------------------------------------
#python
import logging
import os
import sys
#PyQt4
from PyQt4 import QtGui
from PyQt4 import QtCore
#HelgaLauncherDCCButton class
#------------------------------------------------------------------
class HelgaLauncherDCCButton(QtGui.QPushButton):
"""
Subclass of QPushButton to allow for customized drag&drop behaviour
"""
def __init__(self,
logging_level = logging.DEBUG,
button_text = None,
icon_path = None,
icon_path_hover = None,
icon_path_drag = None,
parent=None):
#super class constructor
if(button_text):
super(HelgaLauncherDCCButton, self).__init__(button_text, parent)
else:
super(HelgaLauncherDCCButton, self).__init__(parent)
#icon_path
self.icon_path = icon_path
#set stylesheet
if(self.icon_path):
if(os.path.isfile(self.icon_path)):
self.setStyleSheet("border-image: url({0});".format(self.icon_path))
#icon_path_hover
self.icon_path_hover = icon_path_hover
#icon_path_drag
self.icon_path_drag = icon_path_drag
#accept drops
self.setAcceptDrops(True)
#setMouseTracking
self.setMouseTracking(True)
def enterEvent(self, event):
#set stylesheet
if(self.icon_path_hover):
if(os.path.isfile(self.icon_path_hover)):
self.setStyleSheet("border-image: url({0});".format(self.icon_path_hover))
event.accept()
def leaveEvent(self, event):
#set stylesheet
if(self.icon_path):
if(os.path.isfile(self.icon_path)):
self.setStyleSheet("border-image: url({0});".format(self.icon_path))
event.accept()
def dragEnterEvent(self, event):
if (event.mimeData().hasUrls()):
#set stylesheet
if(self.icon_path_drag):
if(os.path.isfile(self.icon_path_drag)):
self.setStyleSheet("border-image: url({0});".format(self.icon_path_drag))
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if (event.mimeData().hasUrls()):
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dragLeaveEvent(self, event):
#set stylesheet
if(self.icon_path):
if(os.path.isfile(self.icon_path)):
self.setStyleSheet("border-image: url({0});".format(self.icon_path))
event.accept()
def dropEvent(self, event):
if (event.mimeData().hasUrls()):
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
#url_list
url_list = []
for url in event.mimeData().urls():
url_list.append(str(url.toLocalFile()))
#emit dropped
self.emit(QtCore.SIGNAL("dropped"), url_list)
else:
event.ignore()
| [
"wagenertimm@gmail.com"
] | wagenertimm@gmail.com |
77229a4588e6d23dd1773b0b283826c4b4f1010f | aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14 | /mi/dataset/parser/vel3d_a_mmp_cds.py | 2b6a51b87d0768e34cb0060017f7cfadd4cf98d9 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oceanobservatories/mi-instrument | 3ad880c1366b1a8461fc9085768df0e9ddeb6ef5 | bdbf01f5614e7188ce19596704794466e5683b30 | refs/heads/master | 2023-07-23T07:28:36.091223 | 2023-07-14T15:54:49 | 2023-07-14T15:54:49 | 24,165,325 | 1 | 32 | BSD-2-Clause | 2023-07-13T01:39:22 | 2014-09-17T22:53:22 | Python | UTF-8 | Python | false | false | 2,609 | py |
#!/usr/bin/env python
"""
@package mi.dataset.parser.vel3d_a_mmp_cds
@file marine-integrations/mi/dataset/parser/vel3d_a_mmp_cds.py
@author Jeremy Amundson
@brief Parser for the Vel3dAMmpCds dataset driver
Release notes:
initial release
"""
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.dataset.parser.mmp_cds_base import MmpCdsParserDataParticle
log = get_logger()
__author__ = 'Jeremy Amundson'
__license__ = 'Apache 2.0'
class DataParticleType(BaseEnum):
INSTRUMENT = 'vel3d_a_mmp_cds_instrument'
class Vel3dAMmpCdsParserDataParticleKey(BaseEnum):
VA = 'vel3d_a_va'
VB = 'vel3d_a_vb'
VC = 'vel3d_a_vc'
VD = 'vel3d_a_vd'
HX = 'vel3d_a_hx'
HY = 'vel3d_a_hy'
HZ = 'vel3d_a_hz'
TX = 'vel3d_a_tx'
TY = 'vel3d_a_ty'
class Vel3dAMmpCdsParserDataParticle(MmpCdsParserDataParticle):
"""
Class for parsing data from the Vel3dAMmpCds data set
"""
_data_particle_type = DataParticleType.INSTRUMENT
def _get_mmp_cds_subclass_particle_params(self, dict_data):
"""
This method is required to be implemented by classes that extend the MmpCdsParserDataParticle class.
This implementation returns the particle parameters specific for Vel3dAMmpCds.
@returns a list of particle params specific to Vel3dAMmpCds
"""
va = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.VA,
dict_data['va'], float)
vb = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.VB,
dict_data['vb'], float)
vc = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.VC,
dict_data['vc'], float)
vd = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.VD,
dict_data['vd'], float)
hx = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.HX,
dict_data['hx'], float)
hy = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.HY,
dict_data['hy'], float)
hz = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.HZ,
dict_data['hz'], float)
tx = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.TX,
dict_data['tx'], float)
ty = self._encode_value(Vel3dAMmpCdsParserDataParticleKey.TY,
dict_data['ty'], float)
return [va, vb, vc, vd, hx, hy, hz, tx, ty]
| [
"petercable@gmail.com"
] | petercable@gmail.com |
164bf3d167ffc2e378c1e7376f5833e0b98c9e83 | 6ed48bf3c72e61fe53144a3545ab305112c93501 | /appengine/sheriff_o_matic/ts_alerts.py | 99c4b74cc11e2f6a507fc455536478d75a15aea3 | [
"BSD-3-Clause"
] | permissive | eunchong/infra | ee5f7a9379977de8c814f90dbba3f6adbf06a75c | ce3728559112bfb3e8b32137eada517aec6d22f9 | refs/heads/master | 2022-11-27T06:26:57.415805 | 2016-04-08T12:34:36 | 2016-04-08T12:34:36 | 55,699,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,360 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import alerts_history
import json
import logging
import utils
import webapp2
import zlib
from datetime import datetime as dt
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import ndb
ALLOWED_APP_IDS = ('google.com:monarch-email-alerts-parser')
INBOUND_APP_ID = 'X-Appengine-Inbound-Appid'
class TSAlertsJSON(ndb.Model):
active_until = ndb.DateTimeProperty()
json = ndb.JsonProperty(compressed=True)
@classmethod
def query_active(cls):
return cls.query().filter(TSAlertsJSON.active_until == None)
@classmethod
def query_hash(cls, key):
return cls.get_by_id(key)
class TimeSeriesAlertsHandler(webapp2.RequestHandler):
ALERT_TYPE = 'ts-alerts'
MEMCACHE_COMPRESSION_LEVEL = 9
# Alerts which have continued to fire are re-sent every 5 minutes, so stale
# alerts older than 300 seconds are replaced by incoming alerts.
STALE_ALERT_TIMEOUT = 300
def get(self, key=None):
self.remove_expired_alerts()
if not users.get_current_user():
results = {'date': dt.utcnow(),
'redirect-url': users.create_login_url(self.request.uri)}
self.write_json(results)
return
if key:
logging.info('getting the key: ' + key)
try:
data = memcache.get(key) or TSAlertsJSON.query_hash(key).json
except AttributeError:
self.response.write('This alert does not exist.')
self.response.set_status(404, 'Alert does not exist')
return
if not data:
self.response.write('This alert does not exist.')
self.response.set_status(404, 'Alert does not exist')
elif data.get('private', True) and not utils.is_googler():
logging.info('Permission denied.')
self.abort(403)
else:
self.write_json(data.get(json, data))
else:
query = TSAlertsJSON.query_active().fetch()
data = []
for item in query:
if item.json.get('private', True) and not utils.is_googler():
continue
data.append(item.json)
self.write_json({'alerts': data})
def post(self):
app_id = self.request.headers.get(INBOUND_APP_ID, None)
if app_id not in ALLOWED_APP_IDS:
logging.info('Permission denied')
self.abort(403)
return
self.update_alerts()
def put(self, key):
if not utils.is_googler():
self.response.set_status(403, 'Permission Denied')
return
changed_alert = TSAlertsJSON.query_hash(key)
if not changed_alert:
self.response.write('This alert does not exist.')
self.response.set_status(404, 'Alert does not exist')
return
try:
data = json.loads(self.request.body)
except ValueError:
warning = ('Content %s was not valid JSON string.', self.request.body)
self.response.set_status(400, warning)
return
logging.info('Alert before: ' + str(changed_alert))
logging.info('Data: ' + str(data))
changed_alert.json.update(data)
logging.info('Alert after: ' + str(changed_alert))
changed_alert.put()
memcache.set(key, changed_alert.json)
self.response.write("Updated ts-alerts.")
def delete(self, key):
if not utils.is_googler():
self.response.set_status(403, 'Permission Denied')
return
if key == 'all':
all_keys = TSAlertsJSON.query().fetch(keys_only=True)
ndb.delete_multi(all_keys)
for k in all_keys:
logging.info('deleting key from memcache: ' + k.id())
memcache.delete(k.id())
self.response.set_status(200, 'Cleared all alerts')
return
changed_alert = TSAlertsJSON.query_hash(key)
if not changed_alert:
self.response.write('This alert does not exist.')
self.response.set_status(404, 'Alert does not exist')
return
memcache.delete(key)
changed_alert.key.delete()
def write_json(self, data):
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Content-Type'] = 'application/json'
data = utils.generate_json_dump(data)
self.response.write(data)
def remove_expired_alerts(self):
active_alerts = TSAlertsJSON.query_active().fetch()
for alert in active_alerts:
alert_age = utils.secs_ago(alert.json['alert_sent_utc'])
if alert_age > self.STALE_ALERT_TIMEOUT:
logging.info('%s expired. alert age: %d.', alert.key.id(), alert_age)
alert.active_until = dt.utcnow()
alert.json['active_until'] = dt.strftime(alert.active_until, '%s')
alert.put()
memcache.set(alert.key.id(), alert.json)
def update_alerts(self):
self.remove_expired_alerts()
try:
alerts = json.loads(self.request.body)
except ValueError:
warning = 'Content field was not valid JSON string.'
self.response.set_status(400, warning)
logging.warning(warning)
return
if alerts:
self.store_alerts(alerts)
def store_alerts(self, alert):
pre_hash_string = alert['mash_expression'] + alert['active_since']
hash_key = utils.hash_string(pre_hash_string)
alert['hash_key'] = hash_key
new_entry = TSAlertsJSON(
id=hash_key,
json=alert,
active_until=None)
new_entry.put()
memcache.set(hash_key, alert)
def set_memcache(self, key, data):
json_data = utils.generate_json_dump(data, False)
compression_level = self.MEMCACHE_COMPRESSION_LEVEL
compressed = zlib.compress(json_data, compression_level)
memcache.set(key, compressed)
class TimeSeriesAlertsHistory(alerts_history.AlertsHistory):
def get(self, timestamp=None):
result_json = {}
if not users.get_current_user():
result_json['login-url'] = users.create_login_url(self.request.uri)
return result_json
alerts = TSAlertsJSON.query_active().fetch()
if timestamp:
try:
time = dt.fromtimestamp(int(timestamp))
except ValueError:
self.response.set_status(400, 'Invalid timestamp.')
return
if time > dt.utcnow():
self.response.write('Sheriff-o-matic cannot predict the future... yet.')
self.response.set_status(400, 'Invalid timestamp.')
else:
time = dt.utcnow()
alerts += TSAlertsJSON.query(TSAlertsJSON.active_until > time).fetch()
history = []
for a in alerts:
ts, private = timestamp, a.json['private']
in_range = not (ts and utils.secs_ago(a.json['active_since_utc'], ts) < 0)
permission = utils.is_googler() or not private
if in_range and permission:
history.append(a.json)
result_json.update({
'timestamp': time.strftime('%s'),
'time_string': time.strftime('%Y-%m-%d %H:%M:%S %Z'),
'active_alerts': history
})
self.write_json(result_json)
def write_json(self, data):
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Content-Type'] = 'application/json'
data = utils.generate_json_dump(data)
self.response.write(data)
app = webapp2.WSGIApplication([
('/ts-alerts', TimeSeriesAlertsHandler),
('/ts-alerts/(.*)', TimeSeriesAlertsHandler),
('/ts-alerts-history', TimeSeriesAlertsHistory),
('/ts-alerts-history/(.*)', TimeSeriesAlertsHistory)])
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
742973703c04829146d003d6539c43c7eb72c01a | 4eef999a5ffc3c499d63fafb0f673f62628480fd | /research/migrations/0007_remove_publications_pdf.py | 28c020f7b8f4557a8e60839f00468101e2bfdab9 | [] | no_license | lvjindi/RISE_CH | 6d8308e2a178e8ceee5a0b56b7227a37d163c72e | 039518bc3d01bab9586f37b4b40188b526e61f52 | refs/heads/master | 2020-04-15T20:33:32.367008 | 2019-09-03T06:51:23 | 2019-09-03T06:51:23 | 164,982,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | # Generated by Django 2.1.4 on 2019-02-20 09:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('research', '0006_publications_pdf'),
]
operations = [
migrations.RemoveField(
model_name='publications',
name='pdf',
),
]
| [
"793098580@qq.com"
] | 793098580@qq.com |
22ba34687559f57f6e0f7f0c1096381ab009a691 | efb444d7d31a6a22fa8c8ab65621a1303bd25606 | /tests/tests.py | fee3e14e5e4018a03cd781e22c1f40b505855da3 | [] | no_license | newslynx/siegfried | 7af127bf51251aeaaa9cd1645bd5d467f238402f | d88f69c770aff0e87ce054f9f854f6cb4f4a36c4 | refs/heads/master | 2020-04-29T08:57:17.862742 | 2015-06-04T17:53:57 | 2015-06-04T17:53:57 | 20,551,474 | 3 | 1 | null | 2019-10-22T17:37:37 | 2014-06-06T04:32:47 | Python | UTF-8 | Python | false | false | 4,890 | py | import unittest
import os
from siegfried import *
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
class Tests(unittest.TestCase):
def test_urls_from_string(self):
string = """
http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0
http://bitly.com/lfsaff
http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0
"""
truth = set([
"http://bitly.com/lfsaff",
"http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0"
])
test = set(urls_from_string(string))
print test
assert(test == truth)
def test_urls_from_html(self):
html = """
<a href="http://enigma.io">Enigma.io</a>
<p><a href="http://enigma.io">Enigma.io</a>
is a search engine and API for public data.
We find certain datasets to be especially powerful
because the underlying phenomena they capture
have such a fundamental impact on our world.
Climate affects
<a href="https://app.enigma.io/table/us.gov.usda.fas.psd.psd_alldata">our agricultural production</a>
<a href="https://app.enigma.io/table/us.gov.bls.ap.data-2-gasoline-join">the price of gasoline</a>
the livelihood of small businesses or
<a href="/search/source/us.gov.dol.oflc.h2a">temporary farm workers</a>,
and ultimately the sustainability of our species on this planet.</p>
"""
truth = set([
"http://enigma.io",
"https://app.enigma.io/table/us.gov.usda.fas.psd.psd_alldata",
"https://app.enigma.io/table/us.gov.bls.ap.data-2-gasoline-join",
"https://app.enigma.io/search/source/us.gov.dol.oflc.h2a"
])
test = set(urls_from_html(html, domain="https://app.enigma.io/"))
try:
assert(test == truth)
except AssertionError:
print test
print truth
raise AssertionError
def test_get_domain(self):
case = 'http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0'
assert(get_domain(case) == 'www.nytimes.com')
def test_get_simple_domain(self):
case = 'http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0'
assert(get_simple_domain(case) == 'nytimes')
def test_remove_args(self):
case = 'http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0'
assert(remove_args(case) == 'http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html')
def test_is_article_url(self):
test_fp = os.path.join(TEST_DIR, 'fixtures/article_urls.txt')
with open(test_fp, 'rb') as f:
for case in f.read().split('\n'):
print case
items = case.split(',')
tf = items[0]
url = items[1]
truth_val = True if tf == '1' else False
assert(is_article_url(url) == truth_val)
def test_url_to_slug(self):
case = 'http://www.nytimes.com/video/movies/100000002920951/anatomy-8216the-fault-in-our-stars8217.html?smid=tw-nytimes'
assert(url_to_slug(case) == '100000002920951-anatomy-8216the-fault-in-our-stars8217')
def test_prepare_url(self):
cases = [
('http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html?hp&_r=0',
'http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report.html'),
('http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report/index.html',
'http://www.nytimes.com/2014/06/06/business/gm-ignition-switch-internal-recall-investigation-report/')
]
for c in cases:
test, truth = c
assert(prepare_url(test) == truth)
def test_is_short_url(self):
cases = [
'1.usa.gov/1kEeAcb',
'bit.ly/1kzIQWw',
'http://1.usa.gov/1kEeAcb'
]
for c in cases:
assert(is_short_url(c))
def test_unshorten_url(self):
cases = [
('http://nyti.ms/1oxYm3e',
'http://www.nytimes.com/video/movies/100000002920951/anatomy-8216the-fault-in-our-stars8217.html'),
('nyti.ms/1oxYm3e',
'http://www.nytimes.com/video/movies/100000002920951/anatomy-8216the-fault-in-our-stars8217.html'),
('http://bit.ly/1kzIQWw',
'http://www.fromscratchradio.com/show/marc-dacosta'),
('bit.ly/aaaaaa', 'http://bit.ly/aaaaaa'),
('http://ow.ly/i/5OTms', 'http://ow.ly/i/5OTms'),
('http://j.mp/1jBOKo1', 'http://earthfix.info/portables/')
]
for c in cases:
test, truth = c
try:
test = prepare_url(unshorten_url(test))
assert(test == truth)
except AssertionError:
print "failed on %s" % test
raise
| [
"brianabelson@gmail.com"
] | brianabelson@gmail.com |
acb3cfec9249aba0a3f5f20e6b55060966446a06 | bc7cd6689a8052d442ded8e876de1e5f22bfad6c | /lsml/gradient/masked_gradient.py | 5761fcf20299c4edfcdbc747a83125cf7a63b094 | [
"BSD-3-Clause"
] | permissive | tor4z/level-set-machine-learning | 3a359e0d55137f3c0a9cbcaf25048c61573abd25 | 38460e514d48f3424bb8d3bd58cb3eb330153e64 | refs/heads/master | 2022-04-08T08:04:27.200188 | 2020-01-26T03:09:56 | 2020-01-26T03:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,136 | py | import ctypes
import pathlib
import numpy as np
from numpy.ctypeslib import ndpointer
try:
import lsml
util = pathlib.Path(lsml.__file__).parent / 'util'
name = list(util.glob('masked_gradient*'))[0]
_masked_gradient = ctypes.cdll.LoadLibrary(str(name))
except Exception:
raise ImportError('Could not find shared library for masked_gradient')
def _get_gradient_centered_func(ndim):
""" Gets the function from the c module and sets up the respective
argument and return types
"""
func = getattr(_masked_gradient, 'gradient_centered{:d}d'.format(ndim))
func.restype = None
array_dimension_args = ((ctypes.c_int,) * ndim)
array_arg = (ndpointer(ctypes.c_double),)
mask_arg = (ndpointer(ctypes.c_bool),)
gradient_args = (ndpointer(ctypes.c_double),) * ndim
gradient_magnitude_arg = (ndpointer(ctypes.c_double),)
delta_args = (ctypes.c_double,) * ndim
normalize_arg = (ctypes.c_int,)
func.argtypes = (
array_dimension_args +
array_arg +
mask_arg +
gradient_args +
gradient_magnitude_arg +
delta_args +
normalize_arg
)
return func
def _get_gradient_magnitude_osher_sethian_func(ndim):
""" Gets the function from the c module and sets up the respective
argument and return types
"""
func = getattr(_masked_gradient, 'gmag_os{:d}d'.format(ndim))
func.restype = None
array_dimension_args = ((ctypes.c_int,) * ndim)
array_arg = (ndpointer(ctypes.c_double),)
mask_arg = (ndpointer(ctypes.c_bool),)
nu_arg = (ndpointer(ctypes.c_double),)
gradient_magnitude_arg = (ndpointer(ctypes.c_double),)
delta_args = (ctypes.c_double,) * ndim
func.argtypes = (
array_dimension_args +
array_arg +
mask_arg +
nu_arg +
gradient_magnitude_arg +
delta_args
)
return func
def gradient_centered(arr, mask=None, dx=None,
return_gradient_magnitude=True,
normalize=False):
"""
Compute the centered difference approximations of the partial
derivatives of `arr` along each coordinate axis, computed only
where `mask` is true.
Note
----
Only dimensions 1, 2, and 3 are supported.
Parameters
----------
arr: ndarray, dtype=float
The gradient of `arr` is returned.
mask: ndarray, dtype=bool, same shape as `arr`, default=None
The gradient of `arr` is only computed where `mask` is true. If
None (default), then mask True everywhere.
dx: ndarray, dtype=float, len=arr.ndim
These indicate the "delta" or spacing terms along each axis.
If None (default), then spacing is 1.0 along each axis.
return_gradient_magnitude: bool, default=True
If True, the gradient magnitude is computed and returned also.
normalize: bool, default=False
If True, then the gradient terms are normalized so that the
gradient magnitude is one if computed over the gradient terms.
Note that if `return_mag` is True, the gradient magnitude is
magnitude value prior to normalization (not necessarily one).
Returns
-------
[gradient_1, ... , gradient_n], gradient_magnitude: list, ndarray
Returns the gradient along each axis approximated by centered
differences (only computed where mask is True). The gradient magnitude
is optionally returned.
"""
ndim = arr.ndim
assert 1 <= ndim <= 3, "Only dimensions 1-3 supported."
if arr.dtype != np.float:
raise ValueError("`arr` must be float type.")
if mask is not None:
if mask.ndim != ndim:
raise ValueError("Shape mismatch between `mask` and `arr`.")
else:
mask = np.ones(arr.shape, dtype=np.bool)
if dx is not None:
if len(dx) != ndim:
raise ValueError("`dx` vector shape mismatch.")
else:
dx = np.ones(ndim, dtype=np.float)
gradients = [np.zeros_like(arr) for _ in range(ndim)]
gradient_magnitude = np.zeros_like(arr)
# Set up the C function
func = _get_gradient_centered_func(ndim=ndim)
# Set up the arguments to the C function
args = (
arr.shape +
(arr,) +
(mask,) +
tuple(gradients) +
(gradient_magnitude,) +
tuple(dx) +
(int(normalize),)
)
# Call the C function
func(*args)
if return_gradient_magnitude:
return gradients, gradient_magnitude
else:
return gradients
def gradient_magnitude_osher_sethian(arr, nu, mask=None, dx=None):
"""
This numerical approximation is an upwind approximation of
the velocity-dependent gradient magnitude term in the PDE:
.. math::
u_t = \\nu \\| Du \\|
from Osher and Sethian [1]. This is why the function is called
`gradient_magnitude_osher_sethian`.
In the PDE, the gradient vector of u is assumed to point inward and
:math:`\\nu` governs the velocity in the normal direction, with
positive values corresponding to movement in the outward normal
direction (expansion) and negative values corresponding to the inward
normal direction (contraction).
Note that in Osher and Sethian's formulation they write:
.. math::
u_t + F \\| Du \\| = 0
Thus, the correspondence is :math:`\\nu = -F`.
[1]: Level Set Methods. Evolving Interfaces in Geometry,
Fluid Mechanics, Computer Vision, and Materials Science
J.A. Sethian, Cambridge University Press, 1996
Cambridge Monograph on Applied and Computational Mathematics
Parameters
----------
arr: ndarray, dtype=float
The gradient of `A` is returned.
mask: ndarray, dtype=bool, same shape as `A`, default=None
The gradient of `A` is only computed where `mask` is true. If
None (default), then mask is all ones.
dx: ndarray, dtype=float, len=A.ndim
These indicate the "delta" or spacing terms along each axis.
If None (default), then spacing is 1.0 along each axis.
Returns
-------
gradient_magnitude: ndarray
The velocity-dependent gradient magnitude approximation.
"""
ndim = arr.ndim
assert 1 <= ndim <= 3, "Only dimensions 1-3 supported."
if arr.dtype != np.float:
raise ValueError("`arr` must be float type.")
if mask is not None:
if mask.ndim != ndim:
raise ValueError("Shape mismatch between `mask` and `arr`.")
else:
mask = np.ones(arr.shape, dtype=np.bool)
if dx is not None:
if len(dx) != ndim:
raise ValueError("`dx` vector shape mismatch.")
else:
dx = np.ones(ndim, dtype=np.float)
gradient_magnitude = np.zeros_like(arr)
# Set up the C function
func = _get_gradient_magnitude_osher_sethian_func(ndim=ndim)
# Set up the arguments to the C function
args = (
arr.shape +
(arr,) +
(mask,) +
(nu,) +
(gradient_magnitude,) +
tuple(dx)
)
# Call the C function
func(*args)
return gradient_magnitude
| [
"mhancock743@gmail.com"
] | mhancock743@gmail.com |
497d3730997e4c92aa774ca991380c8f5f5a0a61 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/kvrsha004/question2.py | 8a024870f37a0d26fd12907a2b84c1142c14e054 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | #Time Checker
h = eval(input("Enter the hours:\n"))
m = eval(input("Enter the minutes:\n"))
s = eval(input("Enter the seconds:\n"))
if 0<=h<=23 and 0<=m<=59 and 0<=s<=59:
print("Your time is valid.")
else:
print("Your time is invalid.") | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
ec4d62afbf12f30e5271ff50bea6716016c454e8 | b55ef6339ccd57a01f1fe48f863b410b6a45e7d4 | /GraphQL-django/GraphQL/settings.py | 0bab1ee70e6cb0134ca4b2651d4175b567ab107a | [] | no_license | rafiulgits/HelloPy | b1ebc51e55cec6ebbbd0afcfc9a5db66dc2e10d0 | 17d93799a760e604a8ffcbf969cc1d266c5445ac | refs/heads/master | 2021-06-22T02:13:59.949583 | 2019-09-22T15:11:12 | 2019-09-22T15:11:12 | 186,347,440 | 0 | 0 | null | 2021-06-10T21:38:05 | 2019-05-13T04:51:53 | Python | UTF-8 | Python | false | false | 3,455 | py | """
Django settings for GraphQL project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e&0cb6x44kdyu-#$_a)3q97xeh04lp&t48h=g+dutc%gv0arhu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app', # my app
'moc',
'graphene_django', # GraphQL
]
GRAPHENE = {
'SCHEMA': 'app.schema.schema'
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GraphQL.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GraphQL.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'moc',
'USER': 'postgres',
'PASSWORD':'pass1234',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"avoidcloud@gmail.com"
] | avoidcloud@gmail.com |
bd023d6f8effde683c485545a2ed813842d9611a | 9b3e46ef2ffd65cccace3e3e3d93438c077e4f9e | /main/gsed/template.py | 9725a86dee3c8895ead5e804258afbe684a88d8e | [
"BSD-2-Clause"
] | permissive | wizadr/cports | 1dd043045fc63b061f803d1992a9ccdc995850ad | be5f4695305d9c00de9d4e252e67db8081690c3e | refs/heads/master | 2023-08-21T11:35:16.710064 | 2021-10-25T00:38:04 | 2021-10-25T00:38:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | pkgname = "gsed"
pkgver = "4.8"
pkgrel = 0
build_style = "gnu_configure"
configure_args = [
"--enable-acl",
"--program-prefix=g",
"gl_cv_func_working_acl_get_file=yes",
"ac_cv_lib_error_at_line=no",
"ac_cv_header_sys_cdefs_h=no",
]
makedepends = ["acl-devel"]
checkdepends = ["perl"]
pkgdesc = "GNU stream editor"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-3.0-or-later"
url = "http://www.gnu.org/software/sed"
source = f"$(GNU_SITE)/sed/sed-{pkgver}.tar.xz"
sha256 = "f79b0cfea71b37a8eeec8490db6c5f7ae7719c35587f21edb0617f370eeff633"
# most sed tests need bash
options = ["!check"]
def post_install(self):
self.rm(self.destdir / "usr/share/info", recursive = True)
| [
"q66@chimera-linux.org"
] | q66@chimera-linux.org |
411cf5fa7334255995866f446a758303501b3d03 | c64d6d1fce81212965b1df7d4f4d4e72f218243e | /primeinterval.py | 8b485cc98ed4eb6b867127c8ef849370beb1fcdb | [] | no_license | saipoojavr/saipoojacodekata | 384b0aa19f29c8b66f9498ebfbdf162cda4ddc97 | 3e38f1f06c4348f4262f654526c86d64e6893029 | refs/heads/master | 2020-05-23T01:01:54.615085 | 2019-12-06T15:29:54 | 2019-12-06T15:29:54 | 186,580,757 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | try:
num1,num2=input().split()
num1=int(num1)
num2=int(num2)
for iter in range(num1+1,num2):
if iter > 1:
for iter1 in range(2, (iter//2)+1):
if (iter % iter1) == 0:
break
else:
print(iter,end=" ")
else:
continue
except ValueError:
print("invalid") | [
"noreply@github.com"
] | saipoojavr.noreply@github.com |
35506474f9273f89aa6c97be42bafa5e7ce59f8f | c5294a8e9a6aa7da37850443d3a5d366ee4b5c35 | /build/spencer_people_tracking/tracking/people/spencer_tracking_metrics/catkin_generated/pkg.installspace.context.pc.py | 46de23c0fd1cd543af53a8f6721b45532c301043 | [] | no_license | scutDavid/ros_gradution_project | 6eab9a5776ae090ae8999d31e840a12a99020c79 | fbbd83ada5aa223809615d55a48e632699afd4b5 | refs/heads/master | 2020-03-07T18:39:24.084619 | 2018-04-25T13:41:04 | 2018-04-25T13:41:04 | 127,647,113 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "spencer_tracking_metrics"
PROJECT_SPACE_DIR = "/home/wwh/qqq/install"
PROJECT_VERSION = "1.0.8"
| [
"1902828943@qq.com"
] | 1902828943@qq.com |
f0a7c8aafc76bdb86f0181451612dd54e24692a6 | 3b7474148c07df7f4755106a3d0ada9b2de5efdc | /training/c32_pygame/e01_init/pingpong/base.py | e96dcb69be7879c36e6baf943def71ecdcc87791 | [] | no_license | juancsosap/pythontraining | 7f67466846138f32d55361d64de81e74a946b484 | 1441d6fc9544042bc404d5c7efffd119fce33aa7 | refs/heads/master | 2021-08-26T05:37:15.851025 | 2021-08-11T22:35:23 | 2021-08-11T22:35:23 | 129,974,006 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | import pygame as pg
from game import Game
class PingPong(Game):
def __init__(self):
super().__init__('Ping Pong', (500, 300), 5000) # 5 Seconds
self.start()
| [
"juan.c.sosa.p@gmail.com"
] | juan.c.sosa.p@gmail.com |
fe0d3cc4a8e50673a6ec0d8510563c3969e9453a | 5201e237c0d58cdfdbc2fdf8103f9141161eb9f8 | /itkQuadEdgeMeshToQuadEdgeMeshFilterPython.pyi | 5e9ffac0e39ff61c112c4e2e98749a1d9ad06356 | [] | no_license | hjmjohnson/itk-stubs | 704f5b92a755e55b81d02fcad62a366143e125f3 | 771951d007ae425b758e088eae6f9e4ca0e4afb1 | refs/heads/main | 2023-01-22T05:50:33.649088 | 2020-12-04T01:31:09 | 2020-12-04T01:35:06 | 318,368,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | pyi | import itk.itkArrayPython
from itk.support import itkHelpers as itkHelpers
from typing import Any
class _SwigNonDynamicMeta(type):
__setattr__: Any = ...
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_New(): ...
class itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2(itk.itkQuadEdgeMeshBasePython.itkMeshToMeshFilterQEMD2QEMD2):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2___New_orig__: Any
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD2QEMD2_cast: Any
def itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_New(): ...
class itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3(itk.itkQuadEdgeMeshBasePython.itkMeshToMeshFilterQEMD3QEMD3):
thisown: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
__New_orig__: Any = ...
Clone: Any = ...
__swig_destroy__: Any = ...
cast: Any = ...
def New(*args: Any, **kargs: Any): ...
New: Any = ...
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3___New_orig__: Any
itkQuadEdgeMeshToQuadEdgeMeshFilterQEMD3QEMD3_cast: Any
def quad_edge_mesh_to_quad_edge_mesh_filter(*args: Any, **kwargs: Any): ...
def quad_edge_mesh_to_quad_edge_mesh_filter_init_docstring() -> None: ...
| [
"hans-johnson@uiowa.edu"
] | hans-johnson@uiowa.edu |
0b70aaf241d528de27a3b99b3c7693e370274802 | a291cbfcc82f593a2b4f23a150302ef4597c4aee | /MODULE TEMPLATES/new_model/__openerp__.py | 672c5358c7a870f700bdf2f3ccaa1b9638676823 | [] | no_license | Openworx/Odoo9 | 4517abcc22bb9fef9845a67dd87a7b33c08bc5c0 | 244027275aa92c0b40a0929854797ca4d173269d | refs/heads/9.0 | 2020-12-27T12:00:28.766178 | 2016-07-22T09:08:30 | 2016-07-22T09:08:30 | 64,064,163 | 1 | 4 | null | 2016-07-24T12:23:10 | 2016-07-24T12:23:09 | null | UTF-8 | Python | false | false | 499 | py | {
'name': "module.template",
'version': "1.0",
'author': "Vuente",
'category': "Tools",
'website':'http://vuente.com/',
'summary':'Gather statistics on all mail sent out via sendgrid',
'description':'Gather statistics on all mail sent out via sendgrid',
'license':'LGPL-3',
'data': [
'views/module_template_views.xml',
],
'demo': [],
'depends': ['mail'],
'images':[
'static/description/1.jpg',
],
'installable': True,
} | [
"steven@sythiltech.com"
] | steven@sythiltech.com |
dabf07bd6acf991849ba69581a0eed5338ec40af | 0c6cc6690f47ea4e040edaa4168a926c35c41467 | /notes/migrations/0001_initial.py | b9b91822c691ca15e1808ee4f587bce5633fe22e | [] | no_license | dexter2206/token_auth_example | da025431b21a1c2f5fa674f76f41c3b7d7d8e13f | c6abc7eaff8006cba2fca23891317739db1dc0d6 | refs/heads/master | 2023-01-30T13:45:28.626127 | 2020-12-16T09:07:48 | 2020-12-16T09:07:48 | 321,926,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # Generated by Django 3.1.4 on 2020-12-06 06:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"dexter2206@gmail.com"
] | dexter2206@gmail.com |
140201d08759e2b4083cd28857e9c8c07c15cceb | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/skeletons/gui/sounds.py | 3cfda75b516d31579b5be89e496200e19882e6bb | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 866 | py | # 2017.05.04 15:27:41 Střední Evropa (letní čas)
# Embedded file name: scripts/client/skeletons/gui/sounds.py
class ISoundsController(object):
def init(self):
raise NotImplementedError
def fini(self):
raise NotImplementedError
def start(self):
raise NotImplementedError
def stop(self, isDisconnected = False):
raise NotImplementedError
@property
def system(self):
raise NotImplementedError
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def isEnabled(self):
raise NotImplementedError
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\skeletons\gui\sounds.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:27:41 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
d0bf6690bfc337099763776b13a1dbb08aeba373 | d7ee76b7f1d6cd038982335792f15959a58a8395 | /SWEA/16234. 인구 이동.py | 4d0cb2dfbc496e3feda0472ae669354ed19a6cea | [] | no_license | min1378/-algorithm | 1c5dea6b2f03e4d376275cfccbf11b240bc659d9 | bfb720277160077a816deec21469a7e597c62d14 | refs/heads/master | 2021-08-02T06:54:10.478501 | 2021-07-31T14:03:01 | 2021-07-31T14:03:01 | 202,688,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | import sys
sys.stdin = open('16234.txt', 'r')
def isWall(x, y):
if x < 0 or x > N - 1:
return True
if y < 0 or y > N - 1:
return True
return False
def DFS(x, y):
open_kingdom[x][y] = union_number
stack = []
while True:
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
for mode in range(4):
test_x = x + dx[mode]
test_y = y + dy[mode]
if isWall(test_x, test_y) == False:
if open_kingdom[test_x][test_y] == -1 and abs(mapdata[test_x][test_y] - mapdata[x][y]) >= L and abs(
mapdata[test_x][test_y] - mapdata[x][y]) <= R:
open_kingdom[test_x][test_y] = union_number
stack.append([test_x, test_y])
if stack == []:
return
x, y = stack.pop()
# TC = int(input())
# for test_case in range(1, TC+1):
N, L, R = map(int, input().strip().split())
mapdata = [list(map(int, input().strip().split())) for _ in range(N)]
count = 0
while True:
open_kingdom = [[-1] * N for _ in range(N)]
union_number = 0
for i in range(N):
for j in range(N):
if open_kingdom[i][j] == -1:
DFS(i, j)
union_number += 1
if union_number == N * N:
break
union_info = [[0, 0] for _ in range(union_number)]
for i in range(N):
for j in range(N):
union_info[open_kingdom[i][j]][0] += mapdata[i][j]
union_info[open_kingdom[i][j]][1] += 1
union_last = [0] * union_number
for i in range(len(union_info)):
if union_info[i][0] != 0 and union_info[i][1] != 0:
union_last[i] = (union_info[i][0] // union_info[i][1])
for j in range(N):
for k in range(N):
mapdata[j][k] = union_last[open_kingdom[j][k]]
count += 1
print(count)
| [
"qwes123@naver.com"
] | qwes123@naver.com |
3c433c2411597b5f1fd7cffaeb342fe3d39949e8 | 7fa828f9565f04c9f9f5eaee57d38d93394e9613 | /web/consult/migrations/0027_auto_20190610_1111.py | 225e25f7e8630f9e34cab401337b5414f577444b | [] | no_license | tsependa/psycho | b3eeec8a72b3b50f209c8883dc06903b8094a0c3 | 99d16ef3711eea926b205c69487428497c15b8f1 | refs/heads/master | 2020-05-14T15:27:38.132015 | 2019-06-21T09:35:41 | 2019-06-21T09:35:41 | 181,853,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | # Generated by Django 2.2.1 on 2019-06-10 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consult', '0026_auto_20190610_1101'),
]
operations = [
migrations.AlterField(
model_name='webinar',
name='authors',
field=models.ManyToManyField(blank=True, to='consult.Specialist'),
),
migrations.AlterField(
model_name='webinar',
name='themes',
field=models.ManyToManyField(blank=True, to='consult.Theme'),
),
]
| [
"tsependa.s@gmail.com"
] | tsependa.s@gmail.com |
bb1424f65f84b17ab1acd23c65c3d34157260888 | 474e74c654916d0a1b0311fc80eff206968539b1 | /venv/Lib/site-packages/aspose_words_cloud-19.9.2-py2.7.egg/asposewordscloud/models/requests/update_field_request.py | f882250a907ac7f4dfb352df84b42dcc309f9a96 | [] | no_license | viktor-tchemodanov/Training_Tasks_Python_Cloud | 4592cf61c2f017b314a009c135340b18fa23fc8f | b7e6afab4e9b76bc817ef216f12d2088447bd4cd | refs/heads/master | 2020-09-04T10:39:23.023363 | 2019-11-05T10:36:45 | 2019-11-05T10:36:45 | 219,712,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | # --------------------------------------------------------------------------------
# <copyright company="Aspose" file="UpdateFieldRequest.py">
# Copyright (c) 2019 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------
class UpdateFieldRequest(object):
"""
Request model for update_field operation.
Initializes a new instance.
:param name The document name.
:param field Field data.
:param node_path Path to the node, which contains collection of fields.
:param index Object index.
:param folder Original document folder.
:param storage Original document storage.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, name, field, node_path, index, folder=None, storage=None, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.name = name
self.field = field
self.node_path = node_path
self.index = index
self.folder = folder
self.storage = storage
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
| [
"vtchemodanov@hotmail.com"
] | vtchemodanov@hotmail.com |
8d0a1929391ce537bac19237e7b609d8b8ec3400 | a49bf2214bafeceaf37d005d0ae2d9b63ac8c304 | /dynamic/minCostPath.py | 3ee799cbf5f7575a3fe17c0d783c7fc8447b4f66 | [] | no_license | aishwat/missionPeace | aea0e4cb0ba483581b0c01980b2cefb5b5b7394f | 8c2accdc98c04ad1ba3a0f8c789a0261fc03e385 | refs/heads/master | 2023-03-19T09:20:36.920890 | 2023-03-07T14:01:05 | 2023-03-07T14:01:05 | 221,540,152 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | def minCostPath(cost):
rows = len(cost)
cols = len(cost[0])
T = [row[:] for row in cost]
for i in range(rows):
for j in range(cols):
if i == 0 and j == 0:
T[0][0] = cost[0][0]
elif i == 0 and j > 0:
T[i][j] = T[i][j - 1] + cost[i][j]
elif j == 0 and i > 0:
T[i][j] = T[i - 1][j] + cost[i][j]
else:
T[i][j] = min(T[i - 1][j], T[i - 1][j - 1], T[i][j - 1]) + cost[i][j]
for row in range(len(T)):
print(T[row][:])
cost = [[1, 2, 3],
[4, 8, 2],
[1, 5, 3]]
minCostPath(cost)
| [
"aishwat.singh@target.com"
] | aishwat.singh@target.com |
0a9864369423cb5f4c5d6cf2954201ec246d8440 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /103/103.binary-tree-zigzag-level-order-traversal.613374206.Runtime-Error.leetcode.python3.py | b6ec29f0158f49d2359030171253dd806b2ce475 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | class Solution(object):
def zigzagLevelOrder(self, root):
if not root:
return []
result = []
zigzagLevelOrder(root.left, 0, result, True)
zigzagLevelOrder(root.right, 0, result, True)
def zigzagLevelOrder(root, level, result=[], leftToRight=True):
if leftToRight:
result[level].append(root.val)
else:
result[level].insert(0, root.val)
zigzagLevelOrder(root.left, level + 1, result, not leftToRight)
zigzagLevelOrder(root.right, level + 1, result, not leftToRight)
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
2dbef54f1ed0c3523baba94cdb90855b2bfd287d | a99dd87c230fb7849f9b1ef18b81701af112e581 | /algorithm_各种小算法/天平称物.py | ab56ee9c8c02637847b1e5ef293e0339917b741f | [] | no_license | wuhao2/Python_learning | a123ebc1b8c1850b4f9daa23cb949acef39d0f97 | 251b48dec58464ee932f0569697e1ab1e40f8c8c | refs/heads/master | 2020-12-02T17:44:34.981101 | 2017-09-27T11:17:39 | 2017-09-27T11:17:39 | 96,379,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | # 有4个砝码,总重量是40g. 砝码的质量是整数,且各不相等.
# 请确定她们的质量,使之能称出1-40g任何整数质量的物体
# r(1): 1 一个砝码只能表示1
# r(1,3):1,2,3,4 两个砝码能够表示1,2,3,4
# r(1,3, 5):1,2,3,4, 5, 6, 7, 8, 9 两个砝码能够表示1,2,3,4, 5, 6, 7, 8, 9
# r(1, 3, k): k-4--- k+4
def fun(max, n):
"""
:param m: 砝码表示的重量范围最大值
:param n: 砝码的个数
:return: 返回一个装满4个砝码的列表
"""
res = []
while n > 1:
k = sum(res) * 2 + 1 # 根据分析,砝码的重量应该满足这个公式
res.append(k)
n -= 1
k = max - sum(res) # 得到最后一个数
if k - sum(res) - sum(res) < 2:
res.append(k)
return res
return None
print(fun(40, 4)) # [1, 3, 9, 27]
# for a in range(10):
# for b in range(10):
# if a == b: continue
# for c in range(10):
# if a==c or b == c: continue
# for d in range(10):
# if d==a or d==b or d==c: continue
# s = a + b + c + d
# if s in [range(1,11)]:
# print(a, b, c,d)
| [
"wu_hao1314520@126.com"
] | wu_hao1314520@126.com |
5a750796f2efa11a176606dd0ea1a2f006ab47fa | 973713f993166b1d0c2063f6e84361f05803886d | /Day01-15/08_exercise_4.py | 34f8f3b42450034af9ca988bc2ce1e6cd56a38b4 | [
"MIT"
] | permissive | MaoningGuan/Python-100-Days | 20ad669bcc0876b5adfbf2c09b4d25fd4691061a | d36e49d67a134278455438348efc41ffb28b778a | refs/heads/master | 2022-11-17T12:24:45.436100 | 2020-07-18T02:24:42 | 2020-07-18T02:24:42 | 275,157,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
练习2:定义一个类描述平面上的点并提供移动点和
计算到另一个点距离的方法。
"""
from math import sqrt
class Point(object):
def __init__(self, x=0, y=0):
"""
初始化方法
:param x:横坐标
:param y: 纵坐标
"""
self.x = x
self.y = y
def move_to(self, x, y):
"""
移动到指定的位置
:param x: 新的横坐标
:param y: 新的纵坐标
:return:
"""
self.x = x
self.y = y
def move_by(self, dx, dy):
"""
移动指定的增量
:param dx:横坐标的增量
:param dy:纵坐标的增量
:return:
"""
self.x += dx
self.y += dy
def distance_to(self, other):
"""
计算与另一个点的距离
:param other: 另一个点
:return:
"""
dx = self.x - other.x
dy = self.y - other.y
return sqrt(dx ** 2 + dy ** 2)
def __str__(self):
return '(%s, %s)' % (str(self.x), str(self.y))
def main():
p1 = Point(3, 5)
p2 = Point()
print(p1)
print(p2)
p2.move_by(-1, 2)
print(p2)
print(p1.distance_to(p2))
if __name__ == '__main__':
main()
| [
"1812711281@qq.com"
] | 1812711281@qq.com |
92afa5200f7f1ac9d5c85dd86c3412c74f3cf144 | b1403c5a0f8dcf5eec881367f3928c6faf047b1d | /PRINCIPIANTE/1051-1100/1064.py | 8d8e8ace7cd9198a102711c852a9d193918604cc | [] | no_license | apesquero/URI | ba56f9d597e0e781bf85dc14eeeedf9bc206fbc4 | 8c45813d99eb903405ebe1a0e2c6618e87025641 | refs/heads/master | 2021-06-19T23:56:51.798180 | 2021-01-01T18:43:20 | 2021-01-01T18:43:20 | 138,192,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
valores = []
positivos = 0
promedio = 0
for x in range(6):
valor = float(input())
valores.append(valor)
for v in valores:
if v > 0:
positivos += 1
promedio += v
promedio = promedio / positivos
print("%d valores positivos" % positivos)
print("%.1f" % promedio)
| [
"apesquero@gmail.com"
] | apesquero@gmail.com |
c87fe4ab2c443b2405ba547a94c691ccbb619885 | b260e47d68664c943079be5046e708733a02473a | /BOJ/working/not/BOJ_1969.py | cd5a94c7bed266d9bea4634b53083be4afecaf3a | [] | no_license | 21nkyu/note001 | 911c52a990676fb44cf619872c370a5b451f9da3 | edcf5ad61d71bc1ed113266bf40bd69183755a89 | refs/heads/main | 2023-08-16T12:29:20.871412 | 2021-09-28T11:11:40 | 2021-09-28T11:11:40 | 385,918,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # https://www.acmicpc.net/problem/1969
# a = set('TATGATAC')
# b = set('TAAGCTAC')
# print(len(a|b))
n, m = map(int, input().split())
ncltd = [input() for _ in range(n)]
print(ncltd)
# cnt = 0
for i in ncltd:
for j in ncltd:
print(i, j)
if i == j:
continue
elif i != j:
diff = list(set(i)|set(j))
print(diff)
# cnt+=diff
# print(cnt)
| [
"adele7178@naver.com"
] | adele7178@naver.com |
2f8a5bb8f219fb8e4d1ee917bb35f56876c6dc43 | efa755e04c9aa7c5a43e84f97191bd35dbea39fe | /leetcode/easy/longestCommonPrefix.py | ff3fe918d9364ab9341680efcfd57a6203eec0a3 | [] | no_license | himanshush200599/codingPart | 8f5b825f2f1bed8055e5ff71c1e339f16f4ac6a9 | bee7e3629f4b59fdc5223da91c66efc44f4dd262 | refs/heads/master | 2021-06-27T19:01:45.632503 | 2020-04-12T17:31:01 | 2020-04-12T17:31:01 | 148,366,936 | 4 | 4 | null | 2020-10-01T03:30:46 | 2018-09-11T19:10:44 | Python | UTF-8 | Python | false | false | 1,517 | py | class Solution:
def longestCommonPrefix(self,strs):
"""
:type strs: List[str]
:rtype: str
"""
"""
1.find length of all string
2. Loop from smallest lenght string and pick charcter by character and compare them
3.if all the characters till now are same then add it to empty string
4. return if matching doesnot occur between character array.
"""
if len(strs)==0:
return ""
if len(strs)==1:
return strs[0]
cp = ''
l = [len(st) for st in strs]
for i in range(min(l)):
tmp = [s[i] for s in strs]
if min(tmp)==max(tmp): #check if all of them are the same
cp = cp + tmp[0]
else:
return cp
return cp
def longestCommonPrefix2(self, strs):
""" Vertical scanning algorithm. """
if not strs:
return None
# for each character in strs[0] at position i, compare it with
# character at i-th position for all the strings.
for i in range(len(strs[0])):
char = strs[0][i]
for j in range(1, len(strs)):
if i >= len(strs[j]) or strs[j][i] != char:
return strs[0][:i]
a = Solution()
print(a.longestCommonPrefix2(['letcode','meet','kee']))
| [
"himanshush214@gmail.com"
] | himanshush214@gmail.com |
2c66f64242edb353201eec79347e39791f6cb66f | 89f077a47ed6498f68397c63efc9364e80f2c659 | /example_project/news_with_archive/factories.py | ccac2841b0deb9bb7dae7ee0f0e0552fecd3fb91 | [
"MIT"
] | permissive | richardbarran/django-minipub | b03023113eddcd99bbaa4f6f475f0cfebafcc0ce | a6a382da2b34fd847e53f31c981afef4e95efc9e | refs/heads/master | 2023-08-06T04:56:27.677247 | 2023-07-26T19:57:17 | 2023-07-26T19:57:17 | 14,829,166 | 8 | 2 | MIT | 2021-05-14T08:18:39 | 2013-11-30T22:04:34 | Python | UTF-8 | Python | false | false | 424 | py | from django.utils.text import slugify
import factory
from .models import Article
class ArticleFactory(factory.django.DjangoModelFactory):
class Meta:
model = Article
# Create some dummy default values for the title (which has to be unique).
title = factory.Sequence(lambda n: f'article{n:0>3}')
slug = factory.LazyAttribute(lambda a: slugify(f'{a.title}'))
status = Article.STATUS.archived
| [
"richard@arbee-design.co.uk"
] | richard@arbee-design.co.uk |
42eaaa7fe1f15e1c81fb0a8df3244aefd2cc8034 | 304e75224229786ba64c6ef2124007c305019b23 | /src/easy/answer/unique_number_of_occurrences.py | 19b7c199cb0dcdd20c911ff1fd466dff3691cbac | [] | no_license | Takuma-Ikeda/other-LeetCode | 9179a8100e07d56138fd3f3f626951195e285da2 | 499616d07011bee730b9967e9861e341e62d606d | refs/heads/master | 2023-04-14T06:09:35.341039 | 2023-04-10T02:29:18 | 2023-04-10T02:29:18 | 226,260,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | from typing import List
from collections import Counter
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
# 出現回数のハッシュマップを作成
d = Counter(arr)
# 出現回数で集合型をつくる
s = set(d.values())
return len(d) == len(s)
# 模範解答
# https://leetcode.com/problems/unique-number-of-occurrences/discuss/393086/Solution-in-Python-3-(one-line)-(beats-100.00-)
'''
class Solution:
def uniqueOccurrences(self, A: List[int]) -> bool:
return (lambda x: len(x) == len(set(x)))(collections.Counter(A).values())
'''
# https://leetcode.com/problems/unique-number-of-occurrences/discuss/458512/Easy-python-solution
'''
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
occur = []
set_arr = set(arr)
for item in set_arr:
occur.append(arr.count(item))
return len(occur) == len(set(occur))
'''
| [
"el.programdear@gmail.com"
] | el.programdear@gmail.com |
cdc18d75f2dead0fcb2b5bba384d4eb25bb45c94 | 46c8c0e435877f4564970198e8a177e9883103e9 | /16_3Sum_Closest/3sum_closest.py | 3142387fb7720195c4419a1c9c84f186a72c08c4 | [] | no_license | Brady31027/leetcode | 26f7c1f4e8bfad0dee4d819f91aa93a241223330 | d66be3a8f002875097754df6138c704e28b79810 | refs/heads/master | 2018-02-16T14:52:31.976844 | 2017-08-10T19:18:59 | 2017-08-10T19:18:59 | 63,519,661 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
def two_sum(pivot, nums, target):
if len(nums) < 2: return None
head, tail, min_dis, ret_sum = 0, len(nums) - 1, 2147483647, 0
while head < tail:
sum = pivot + nums[head] + nums[tail]
if sum == target:
return sum
elif sum > target:
tail -= 1
else:
head += 1
dis = abs(target - sum)
if dis < min_dis:
min_dis, ret_sum = dis, sum
return ret_sum
nums.sort()
closest_sum, min_dis, prev_min_dis = target, 2147483647, 2147483647
for i in range(len(nums)):
if i == 0 or (i > 0 and nums[i] != nums[i - 1]):
sum = two_sum(nums[i], nums[i+1:], target)
if sum is not None and abs(target - sum) < min_dis:
closest_sum = sum
min_dis = abs(target - sum)
if min_dis >= prev_min_dis: return closest_sum
prev_min_dis = min_dis
return closest_sum
| [
"brady31027@gmail.com"
] | brady31027@gmail.com |
2ba3e7494749923c548df3a1ef4312d4f86d612d | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /src/transformers/models/upernet/configuration_upernet.py | f7ad5d04652c909b6c172b4f1375029930b3c8db | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 4,964 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" UperNet model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)
class UperNetConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`UperNetForSemanticSegmentation`]. It is used to
instantiate an UperNet model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the UperNet
[openmmlab/upernet-convnext-tiny](https://huggingface.co/openmmlab/upernet-convnext-tiny) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
The configuration of the backbone model.
hidden_size (`int`, *optional*, defaults to 512):
The number of hidden units in the convolutional layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
Pooling scales used in Pooling Pyramid Module applied on the last feature map.
use_auxiliary_head (`bool`, *optional*, defaults to `True`):
Whether to use an auxiliary head during training.
auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
Weight of the cross-entropy loss of the auxiliary head.
auxiliary_channels (`int`, *optional*, defaults to 256):
Number of channels to use in the auxiliary head.
auxiliary_num_convs (`int`, *optional*, defaults to 1):
Number of convolutional layers to use in the auxiliary head.
auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
Whether to concatenate the output of the auxiliary head with the input before the classification layer.
loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function.
Examples:
```python
>>> from transformers import UperNetConfig, UperNetForSemanticSegmentation
>>> # Initializing a configuration
>>> configuration = UperNetConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = UperNetForSemanticSegmentation(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "upernet"
def __init__(
self,
backbone_config=None,
hidden_size=512,
initializer_range=0.02,
pool_scales=[1, 2, 3, 6],
use_auxiliary_head=True,
auxiliary_loss_weight=0.4,
auxiliary_in_channels=384,
auxiliary_channels=256,
auxiliary_num_convs=1,
auxiliary_concat_input=False,
loss_ignore_index=255,
**kwargs,
):
super().__init__(**kwargs)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
self.backbone_config = backbone_config
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.pool_scales = pool_scales
self.use_auxiliary_head = use_auxiliary_head
self.auxiliary_loss_weight = auxiliary_loss_weight
self.auxiliary_in_channels = auxiliary_in_channels
self.auxiliary_channels = auxiliary_channels
self.auxiliary_num_convs = auxiliary_num_convs
self.auxiliary_concat_input = auxiliary_concat_input
self.loss_ignore_index = loss_ignore_index
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
190dd3430f6ff7e3ec721742114be2f4c8108fad | 61b86dee4dc477d6abbc7cd49693c5496928a976 | /api/serializers.py | eec382ed107147e128139aaf5aba5ca89630b896 | [] | no_license | Spirovanni/techforce3 | 3a748a49025808ac1adabc38f9f3251a3d60a68f | 5e9e80df43a887ba32f35f82b0119a15f7483d07 | refs/heads/master | 2020-09-21T11:21:19.492747 | 2019-12-07T16:06:05 | 2019-12-07T16:06:05 | 224,771,839 | 0 | 0 | null | 2019-12-06T06:06:16 | 2019-11-29T03:42:04 | Python | UTF-8 | Python | false | false | 890 | py | from rest_framework import serializers
from . import models
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = ['id', 'username', 'password']
extra_kwargs = {'password': {'write_only': True, 'required': True}}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
Token.objects.create(user=user)
return user
class OrganisationSerializer(serializers.ModelSerializer):
class Meta:
model = models.Organisation
fields = ['id', 'orgName', 'orgDescription', 'no_of_ratings', 'avg_ratings']
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = models.Rating
fields = ['id', 'stars', 'user', 'organisation']
| [
"blackshieldsx@gmail.com"
] | blackshieldsx@gmail.com |
c5b819de82c5c8f2461260ccff3050691e602181 | 691af65ccd1aa108ab0f76b6e52de3dfe863fcef | /thonny/languages.py | e5a67c3f202a204afea510b58156d552444cbed1 | [
"MIT"
] | permissive | mlogotheti/thonny | 9fdb9e397354c34885bde8046a19566e8d5d10d3 | a715f144b093a0f3289e9a21fef5f4ef18c2b01b | refs/heads/master | 2020-12-20T11:21:30.899807 | 2020-01-22T08:14:26 | 2020-01-22T08:14:26 | 236,057,336 | 1 | 0 | MIT | 2020-01-24T18:15:14 | 2020-01-24T18:15:13 | null | UTF-8 | Python | false | false | 1,286 | py | from thonny import get_workbench
BASE_LANGUAGE_CODE = "en_US"
BASE_LANGUAGE_NAME = "English"
# http://www.internationalphoneticalphabet.org/languages/language-names-in-native-language/
LANGUAGES_DICT = {
"de_DE": "Deutsch [BETA]",
"et_EE": "Eesti",
BASE_LANGUAGE_CODE: BASE_LANGUAGE_NAME,
"es_ES": "Español [ALPHA]",
"fr_FR": "Français [BETA]",
"it_IT": "Italiano [ALPHA]",
"nl_NL": "Nederlands [BETA]",
"pl_PL": "Polski [BETA]",
"pt_PT": "Português (PT) [BETA]",
"pt_BR": "Português (BR) [ALPHA]",
"ru_RU": "Русский",
"tr_TR": "Türkçe [BETA]",
"uk_UA": "Українська",
"zh_TW": "繁體中文-TW",
"zh_CN": "简体中文 ",
"ja_JP": "日本語 [ALPHA]",
"lt_LT": "Lietuvių",
"el_GR": "Ελληνικά [BETA]",
}
# how many spaces to add to button caption in order to make whole text visible
BUTTON_PADDING_SIZES = {"zh_TW": 4, "zh_CN": 4, "ja_JP": 4}
def get_button_padding():
code = get_workbench().get_option("general.language")
if code in BUTTON_PADDING_SIZES:
return BUTTON_PADDING_SIZES[code] * " "
else:
return ""
def get_language_code_by_name(name):
for code in LANGUAGES_DICT:
if LANGUAGES_DICT[code] == name:
return code
| [
"aivar.annamaa@gmail.com"
] | aivar.annamaa@gmail.com |
dc18ff60de2491eb93ba188c4528485a3c677d86 | 4ad28a87a58f1bc91976403440499bcf2cb75442 | /plugins/tidy3d/gtidy3d/config.py | 9642536bcabdcae536b29390dfdf17382ec53bf3 | [
"MIT"
] | permissive | Wang-Clark/gdsfactory | 782eee9ef1b6081f1bff6be8b7f3b36333e5b7df | 1e7d26c6e52f17ae7844f7f86af479b5bff790a8 | refs/heads/master | 2023-07-25T22:34:28.688421 | 2021-09-11T00:44:06 | 2021-09-11T00:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | """Package configuration.
FIXME: tidy3d does not have __version__
# import tidy3d as td
# logger.info(f"tidy3d {td.__version__}")
"""
import pathlib
from loguru import logger
import gdsfactory as gf
__version__ = "0.0.1"
home = pathlib.Path.home()
cwd = pathlib.Path.cwd()
cwd_config = cwd / "config.yml"
module_path = pathlib.Path(__file__).parent.absolute()
repo_path = module_path.parent
sparameters = repo_path / "sparameters"
sparameters.mkdir(exist_ok=True, parents=True)
# home_path = pathlib.Path.home() / ".gf.
# home_path.mkdir(exist_ok=True, parents=True)
# logpath = home_path / "gtidy3d.log"
logpath = cwd / "gtidy3d.log"
logger.info(f"gf.{gf.__version__}")
logger.info(f"gtidy3d {__version__}")
logger.add(sink=logpath)
class Path:
module = module_path
repo = repo_path
sparameters = repo_path / "sparameters"
results = home / ".tidy3d"
PATH = Path()
PATH.results.mkdir(exist_ok=True)
__all__ = ["PATH"]
if __name__ == "__main__":
print(PATH.repo)
| [
"j"
] | j |
01e0095d90bb3cc9fc7861bb32ceb23571630584 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/vmware/plugins/modules/vmware_vmkernel_ip_config.py | fd174f8cdb83f1def3297e97372cc9f76e00012c | [
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 3,609 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: vmware_vmkernel_ip_config
short_description: Configure the VMkernel IP Address
description:
- Configure the VMkernel IP Address
author:
- Joseph Callen (@jcpowermac)
- Russell Teague (@mtnbikenc)
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
vmk_name:
description:
- VMkernel interface name
required: True
type: str
ip_address:
description:
- IP address to assign to VMkernel interface
required: True
type: str
subnet_mask:
description:
- Subnet Mask to assign to VMkernel interface
required: True
type: str
extends_documentation_fragment:
- community.vmware.vmware.documentation
'''
EXAMPLES = r'''
# Example command from Ansible Playbook
- name: Configure IP address on ESX host
community.vmware.vmware_vmkernel_ip_config:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
vmk_name: vmk0
ip_address: 10.0.0.10
subnet_mask: 255.255.255.0
delegate_to: localhost
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.vmware.plugins.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec
def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
for vnic in host_network_system.networkConfig.vnic:
if vnic.device == vmk_name:
spec = vnic.spec
if spec.ip.ipAddress != ip_address:
spec.ip.dhcp = False
spec.ip.ipAddress = ip_address
spec.ip.subnetMask = subnet_mask
host_network_system.UpdateVirtualNic(vmk_name, spec)
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmk_name = module.params['vmk_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = list(host)[0]
changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.