blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f3aba0a3fae8aebb7bce9c64a27e7c4c956ea66 | 480d67d9a3d0dfacc3cf8103450dae1669a52d9e | /setup.py | 42aa82870668e6346f68ebd84546f18e48abbd35 | [] | no_license | alenzhao/probabilistic2020 | 3045261e8855b959e50357edd7533ec4af5b5294 | f748fad88e50e5229eb765ac59cf731a734e22e2 | refs/heads/master | 2021-01-12T17:58:40.989527 | 2016-10-12T19:46:52 | 2016-10-12T19:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | #from distutils.core import setup
from setuptools import setup
from distutils.extension import Extension
import sys
# fix problems with pythons terrible import system
import os
file_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(file_dir, 'prob2020/cython'))
SRC_DIR = 'prob2020'
if '--use-cython' in sys.argv:
USE_CYTHON = True
sys.argv.remove('--use-cython')
else:
USE_CYTHON = False
import numpy as np
ext = '.pyx' if USE_CYTHON else '.cpp'
extensions = [
Extension(SRC_DIR + ".cython.uniform_kde",
[SRC_DIR +'/cython/uniform_kde'+ext],
language='c++',
include_dirs=[SRC_DIR + '/cython/',
np.get_include()]),
Extension(SRC_DIR + ".cython.gaussian_kde",
[SRC_DIR + '/cython/gaussian_kde'+ext],
language='c++',
include_dirs=[SRC_DIR + '/cython/',
np.get_include()]),
Extension(SRC_DIR + ".cython.cutils",
[SRC_DIR + "/cython/cutils"+ext],
language='c++',
include_dirs=[SRC_DIR + '/cpp/',
SRC_DIR + '/cython/',
np.get_include()])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
if 'build_ext' in sys.argv:
# just build cython extension module if build_ext subcommand is used
setup(ext_modules = extensions)
else:
import prob2020
version = prob2020.__version__
AUTHOR = 'Collin Tokheim'
EMAIL = 'fake@gmail.com'
URL = 'https://github.com/KarchinLab/probabilistic2020'
DESCRIPTION = 'Probabilistic 20/20'
PACKAGES = [SRC_DIR, SRC_DIR + '.python',
SRC_DIR + '.cython', SRC_DIR + '.cpp',
SRC_DIR + '.console']
setup(name='probabilistic2020',
version=version,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=PACKAGES,
license='JHU Academic Software License Agreement (free for non-commercial use)',
install_requires=['numpy', 'scipy', 'pandas', 'pysam'],
package_data={
SRC_DIR+'.console': ['*.R']
},
entry_points={
'console_scripts':[
'probabilistic2020 = prob2020.console.probabilistic2020:cli_main',
'mut_annotate = prob2020.console.annotate:cli_main',
'extract_gene_seq = prob2020.console.extract_gene_seq:cli_main',
'simulate_non_silent_ratio = prob2020.console.simulate_non_silent_ratio:cli_main'
]
},
long_description=open('README.rst').read(),
classifiers=['Topic :: Scientific/Engineering :: Bio-Informatics',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research'],
ext_modules=extensions
)
| [
"collintokheim@gmail.com"
] | collintokheim@gmail.com |
99dc3ac93eb4286545895911c78ca1aa95a714b8 | b7f8c050ca4ef10b1319afccb276e44cf18a2010 | /setup.py | 1507a4b936694833ed750ec7d982e98b8d7d4447 | [
"Apache-2.0"
] | permissive | dyna-dot/pyglottolog | 22496a2109cc2a775a67e78d8331883a0cfdac33 | 0f24f24a46d1f510c975337e4c0d8c23b357c8bd | refs/heads/master | 2020-08-14T20:59:10.473487 | 2019-09-18T10:15:34 | 2019-09-18T10:15:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,002 | py | from setuptools import setup, find_packages
setup(
name='pyglottolog',
version='2.2.2.dev0',
author='Robert Forkel',
author_email='forkel@shh.mpg.de',
description='python package for glottolog data curation',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords='data linguistics',
license='Apache 2.0',
url='https://github.com/clld/pyglottolog',
packages=find_packages(where='src'),
package_dir={'': 'src'},
platforms='any',
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['glottolog=pyglottolog.__main__:main'],
},
install_requires=[
'six>=1.9',
'csvw<=1.5.6; python_version < "3.5"',
'csvw>=1.5.6; python_version >= "3.5"',
'clldutils==2.8.0; python_version < "3.5"',
'clldutils>=2.8.0; python_version >= "3.5"',
'purl',
'pycldf==1.6.4; python_version < "3.5"',
'pycldf>=1.6.4; python_version >= "3.5"',
'sqlalchemy',
'tqdm',
'pybtex>=0.22',
'latexcodec',
'unidecode',
'whoosh',
'attrs>=18.1',
'pycountry>=18.12.8',
'termcolor',
'newick<=0.9.2; python_version < "3.5"',
'newick>=0.9.2; python_version >= "3.5"',
'markdown',
'bs4',
'requests',
'nameparser',
],
extras_require={
'dev': ['tox>=2.9', 'flake8', 'pep8-naming', 'wheel', 'twine'],
'test': ['mock>=2', 'pytest>=3.6', 'pytest-mock', 'pytest-cov'],
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
8ad774489dd1562b14923368a5c82d5750bedd7f | 9a2fd5e27d3f811cb18763ed388c2d56ae9907b6 | /模块/模块位置.py | d578991757412884c3b1d2f838aa6ddc40aa2701 | [] | no_license | wzc-ob/PycharmProjects | 5297ce60bade883495e5dbdb614131d31c47682e | 09f5ad6004dbdc83d456cabd78b769fde13d5357 | refs/heads/master | 2020-05-05T07:12:38.789400 | 2019-04-06T10:06:08 | 2019-04-06T10:06:08 | 179,817,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # import sys
# print(sys.path)
# sys.path.append('D:\\PycharmProjects\\模块')#添加Apath为模块查找路径
import sys
sys.path.append('D:\\PycharmProjects\\模块')
import module_test
module_test.m_t_pr()
print('使用module_test模块中的变量:',module_test.name)
| [
"43775612+wzc-ob@users.noreply.github.com"
] | 43775612+wzc-ob@users.noreply.github.com |
19b090d3d81b3707480c416cd85aa5daae56416a | a77fcccb2e46f06842daab98f1057209fe506b18 | /BackJoonOnline/[BOJ]1543_문서검색.py | 1db73e8e2654d5f19a821041568e58ef2a692622 | [] | no_license | gan-ta/Algorithm | 0c55344a6eb8038c9247485a50bc6324e4ef4c3e | 80313278e6e8461891519fd556a65998939bc564 | refs/heads/master | 2023-05-27T17:03:28.236320 | 2021-06-14T18:48:35 | 2021-06-14T18:48:35 | 257,796,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | def calc_dp(n):
max_value = 0
for i in range(0, n - len(sub_str) + 1):
if max_value < dp[i]:
max_value = dp[i]
if full_str[n:].startswith(sub_str):
dp[n] = max_value + 1
else:
dp[n] = max_value
if __name__ == '__main__':
full_str = input()
sub_str = input()
dp = [0] * len(full_str)
for i in range(0, len(full_str)):
calc_dp(i)
print(max(dp))
| [
"gilmat@naver.com"
] | gilmat@naver.com |
f1874cffe8589d7d7cb20dd323029128249aa73e | 71c247dc9bc9fe8c16daec09f337010043ca2943 | /questions/migrations/0002_petmodel.py | 7b116ee386fe280497d4e5214568220213cb4710 | [] | no_license | elcolie/muy | c001c64499c7ecf4a18fd4da5c7263f880a4be2f | 7e8fcc83c9d875806f18c2a57f1d8b6351e6be04 | refs/heads/master | 2020-07-06T03:10:09.449152 | 2019-12-06T12:24:04 | 2019-12-06T12:24:04 | 125,982,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # Generated by Django 2.2.4 on 2019-10-16 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PetModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kind', models.CharField(choices=[('cat', 'Cat'), ('dog', 'Dog')], max_length=100)),
],
),
]
| [
"sarit@elcolie.com"
] | sarit@elcolie.com |
18839609dbc881470c5684a1cfaa7e08aa130f9f | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/common/types/text_label.py | 0c14a9e62aed19713a0e6374abd7c8229def98dd | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,724 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.common',
marshal='google.ads.googleads.v5',
manifest={
'TextLabel',
},
)
class TextLabel(proto.Message):
r"""A type of label displaying text on a colored background.
Attributes:
background_color (google.protobuf.wrappers_pb2.StringValue):
Background color of the label in RGB format. This string
must match the regular expression
'^#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})$'. Note: The background
color may not be visible for manager accounts.
description (google.protobuf.wrappers_pb2.StringValue):
A short description of the label. The length
must be no more than 200 characters.
"""
background_color = proto.Field(proto.MESSAGE, number=1,
message=wrappers.StringValue,
)
description = proto.Field(proto.MESSAGE, number=2,
message=wrappers.StringValue,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
bd39e9509d27e59130f875225feec6bf5ec17ecc | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_thurstone.py | e349ecc070dc0a55dfa91d013b7380c34c3f5900 | [
"Apache-2.0"
] | permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.thurstone import thurstone
def test_thurstone():
"""Test module thurstone.py by downloading
thurstone.csv and testing shape of
extracted data has 9 rows and 9 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = thurstone(test_path)
try:
assert x_train.shape == (9, 9)
except:
shutil.rmtree(test_path)
raise()
| [
"dustinviettran@gmail.com"
] | dustinviettran@gmail.com |
f4014bf9c030093fdd9e44efc7935f2677b3b1ca | 1796043fc26c958b8fc45d9c058e382473c4f3af | /Fabio02_A/f2_a_q22_duracao_jogo.py | dbb6ae9b4c8f2bf3916f579e31f17f48cda61bfd | [] | no_license | Lucakurotaki/ifpi-ads-algoritmos2020 | a69adec27dbb10aceab1bc7038a0b56a760f99d1 | 34d5fedd5825a85404cf9340e42be618981679c1 | refs/heads/master | 2022-03-22T04:44:14.211359 | 2022-02-19T18:48:36 | 2022-02-19T18:48:36 | 246,585,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | def main():
h_inicio = int(input("Digite a hora do início do jogo: "))
m_inicio = int(input("Digite o minuto do início do jogo: "))
h_fim = int(input("Digite a hora do fim do jogo: "))
m_fim = int(input("Digite o minuto do fim do jogo: "))
print(duracao(h_inicio,m_inicio,h_fim,m_fim))
def duracao(hi,mi,hf,mf):
if hf >= hi and mf >= mi:
dur_h = hf - hi
dur_min = mf - mi
elif hf <= hi and mf < mi:
dur_h = 23+hf - hi
dur_min = 60+mf - mi
elif hf < hi and mf >= mi:
dur_h = 24+hf - hi
dur_min = mf - mi
elif hf > hi and mf < mi:
dur_h = hf - hi
dur_min = 60+mf - mi
return "A duração do jogo é de {} horas e {} minutos.".format(dur_h,dur_min)
main()
| [
"noreply@github.com"
] | Lucakurotaki.noreply@github.com |
20ddfbe36286b792f45620f1976827510cbe3b1c | 74549d7c57b4746ac2a9c275aa12bfc577b0e8af | /prob9.py | a921e7c7ee4566f8d4b41372890da77be1ee213e | [] | no_license | abidkhan484/hackerrank_solution | af9dbf6ec1ead920dc18df233f40db0c867720b4 | b0a98e4bdfa71a4671999f16ab313cc5c76a1b7a | refs/heads/master | 2022-05-02T11:13:29.447127 | 2022-04-13T03:02:59 | 2022-04-13T03:02:59 | 99,207,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | def facto(x):
if(x<=0):
return 1
else:
result=1
result = x * facto(x-1)
return result
user_input = int(input().strip())
print(facto(user_input))
| [
"abidkhan484@gmail.com"
] | abidkhan484@gmail.com |
1c4a50aa0a877c28978f7261fdd1fcd169ddfdb8 | b913242e405a7e8860501df6fd8c41513a32e820 | /custom_test.py | 599db660a03cf659a909a8601364125523ca3403 | [
"MIT"
] | permissive | qiuwei/nicegui | 55d34507e3a5dc4e1e0565c5559f81610e5df4ca | aa0c781a80b8e05d8ada0a4cddd670a7ae13fcda | refs/heads/main | 2023-09-04T18:29:48.801099 | 2021-11-25T13:26:36 | 2021-11-25T13:26:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python3
from nicegui import ui
with ui.card():
example = ui.custom_example(on_change=lambda number: label.set_text(f'Custom value: {number}'))
ui.button('Add 100', on_click=lambda: example.add(100))
label = ui.label()
ui.joystick(on_move=lambda e: print("move", e.data.vector), color='blue', size=50)
| [
"falko@zauberzeug.com"
] | falko@zauberzeug.com |
6bf9e79c5fe871e199d6a500d86789841ddc85db | 1d717c797e93b451f7da7c810a0fb4075b1050d5 | /src/optimizer/adamw.py | 11c4536636c11dfc4bdcbd94a8f3a606d9e9e307 | [] | no_license | jessie0624/nlp-task | 32338b08051a3ea192db2bf74c9c969bdff1f6ad | aaeeed86341356d9fd061664f6f7bccf2ac353d0 | refs/heads/master | 2023-01-24T12:06:13.323646 | 2020-12-10T08:38:23 | 2020-12-10T08:38:23 | 292,151,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,386 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: zessay
@license: (C) Copyright Sogou.
@contact: zessay@sogou-inc.com
@file: adamw.py
@time: 2019/12/4 17:49
@description: AdamW优化器函数
'''
import torch
import math
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
Example:
>>> model = LSTM()
>>> optimizer = AdamW(model.parameters(), lr=1e-3, weight_decay=1e-5)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss | [
"jessie_lijie@126.com"
] | jessie_lijie@126.com |
4515b2a2c837a6c1b1154219c1123b96f284b6fc | 0216ac17591c6b3d68cb454371ecd3a5564c7af4 | /project_2/coupled-cluster/tests/test_matrix_elements.py | 2851e2bb3321dbe085273adae5bdd776e8250ba0 | [] | no_license | Schoyen/FYS4411 | 3746a155b4026dbf04009cb4e8960a23201351fe | abb580c3c8bb41a71657f559c27bc6e21e04bf17 | refs/heads/master | 2021-05-11T06:34:02.909787 | 2019-06-27T12:49:00 | 2019-06-27T12:49:00 | 117,991,933 | 1 | 0 | null | 2018-03-06T20:22:03 | 2018-01-18T14:16:08 | Jupyter Notebook | UTF-8 | Python | false | false | 1,300 | py | import pytest
import sparse
from coupled_cluster.matrix_elements.index_map import (
get_indices_nm, generate_index_map
)
from coupled_cluster.matrix_elements.generate_matrices import (
get_coulomb_elements, get_antisymmetrized_elements,
get_one_body_elements_spin
)
def test_two_body_generation():
orbital_integrals = pytest.orbital_integrals
l = pytest.l
num_shells = pytest.num_shells
generate_index_map(num_shells)
sparse.utils.assert_eq(
orbital_integrals, get_coulomb_elements(l), atol=1e-5, rtol=1e-5)
def test_two_body_antisymmetric_generation():
u = pytest.u
l = pytest.l
num_shells = pytest.num_shells
generate_index_map(num_shells)
sparse.utils.assert_eq(
u, get_antisymmetrized_elements(l), atol=1e-5, rtol=1e-5)
def test_one_body_generation():
h = pytest.h
l = pytest.l
num_shells = pytest.num_shells
generate_index_map(num_shells)
sparse.utils.assert_eq(
h, get_one_body_elements_spin(l), atol=1e-5, rtol=1e-5)
def test_large_file():
l = pytest.large_l
orbital_integrals = pytest.large_oi
num_shells = pytest.large_num_shells
generate_index_map(num_shells)
u = get_coulomb_elements(l)
sparse.utils.assert_eq(orbital_integrals, u)
| [
"oyvindschoyen@gmail.com"
] | oyvindschoyen@gmail.com |
35cef8b4c3957b43e069b2a8df2b8e7caefbb133 | e747bac825ed5807f72654030e108b8c8f96b902 | /mysite/.history/blog/views_20200716005453.py | df0879463678c569afeeac8f8bd0ce8858bd4344 | [] | no_license | AyatSoft/Tech_blog | 4415ab7dfb04bc53bddaf16fd4772e8554680ae8 | 4728c44e0685c3b97038db5e0232e12f35446e23 | refs/heads/master | 2022-11-15T23:01:52.310968 | 2020-07-16T08:52:51 | 2020-07-16T08:52:51 | 280,103,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,194 | py | from .models import Post
from django.shortcuts import render, HttpResponsePermanentRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from .forms import PostForm, CommentForm, MyRegistrationForm
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy, reverse
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
# to do
# setting up the sign in page
# sign up page
# redirect to the necessary
# setting up form for sign in and sign up page
# make asign up model
# this will be function based view
def sign_up(request):
# import the Registration form
form = MyRegistrationForm()
registered = False
if request.method == "POST":
# fill the form
form = MyRegistrationForm(data=request.POST)
# validate
if form.is_valid():
form.save()
registered = True
# when the page
fdict = {'form': form, 'registered': registered}
return render(request, 'registration/register.html', fdict)
def login_user(request):
form = AuthenticationForm()
loggedin = False
if request.method == "POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
print(user)
if user is not None:
login(request, user)
loggedin = True
return HttpResponsePermanentRedirect(reverse('blog:post_list'))
return render(request, 'registration/login.html', {'form': form})
@login_required
def logout_user(request):
logout(request)
return HttpResponsePermanentRedirect(reverse('blog:post_list'))
class AboutView(TemplateView):
template_name = "blog/about.html"
class PostListView(ListView):
model = Post
# customize the query
def get_queryset(self):
return Post.objects.order_by('-published_date')
class PostDetailView(DetailView):
model = Post
# in the create View we also use the mixin
# it is the same as the @login_required in the function
# based views
# and the class based view for Create View
class CreatePostView(LoginRequiredMixin, CreateView):
# login mixin wants wo know
# where they will prompt you for login
# so provide a url
login_url = '/login/'
# after login where they will redirect
# this variable name cant be changed
redirect_field_name = 'blog/post_detail.html'
# for creating you need a form
form_class = PostForm
template_name = "blog/post_form.html"
# WHY ADDING THIS ?
# IN THE MODELS WE HAVE THREE FIELD THAT CANT BE NULL AND HAVE TO
# BE ADDED .ONE IS THE USER OBJECT BUT WE DONT GIVE USER TO SET THE AUTHOR FIELD
# THE CURRENT USER WILL BE THE VALUE
# SO THIS METHOD WE OVERRITE AND SET THE LOGGED USER TO THE CURRENT AUTHOR
# OTHER WISE THESE FILED WILL BE EMPTY
# SO WE SET THE VALUE AND THE TITLE AND TEXT WILL BE FILLED WITH USER
def form_valid(self, form):
form.instance.author_id = self.request.user.pk
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UpdateView):
login_url = '/login/'
redirect_field_name = 'blog/post_detail.html'
form_class = PostForm
template_name = "blog/post_form.html"
model = Post
# same thing we will do but this time
# for all the unpublished
# so we change the queryset
class DraftListView(LoginRequiredMixin, ListView):
login_url = '/login/'
redirect_field_name = 'blog/post_list.html'
model = Post
def get_queryset(self):
return Post.objects.filter(published_date__isnull=True).order_by('created_date')
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
# reverse lazy will be done only after the post is
# deleted successfully
# it will wait untill it is done
success_url = reverse_lazy('blog:post_list')
| [
"tanviredu2018@gmail.com"
] | tanviredu2018@gmail.com |
b9ff9e359c42c6f56fe741cd5dca07ef4ea2980f | 7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a | /.history/DEBER_20210904233156.py | f759b7aca2a0174716d1d345f4abf0c57a897202 | [
"MIT"
] | permissive | Alopezm5/PROYECTO-PARTE-1 | a1dce04009b24852c1c60e69bdf602ad3af0574b | bd7a8594edf08d41c6ca544cf6bac01ea4fcb684 | refs/heads/main | 2023-07-25T11:22:17.994770 | 2021-09-07T03:27:34 | 2021-09-07T03:27:34 | 403,670,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Datos de la Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe="",iess=0):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
self.iess=iess
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
# def mostrarempleado(self):
# print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso=""):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=input("Ingresar fecha de nomida (formato año-mes-dia): ")
self.fechaIngreso=input("Ingresar fecha de ingreso (formato año-mes-dia): ")
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
if eleccion==1:
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina
elif eleccion==2:
self.toting = self.sueldo+self.sobretiempo+self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamoEmpleado
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print(self.liquidoRecibir)
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
eleccion=int(input("Va a ingresar un empleado tipo 1. Obreo o 2.Oficina: "))
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo() | [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
d301a9b536d8c15fbe982044f5825fcb03af8bef | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/account/tests/test_bank_statement_reconciliation.py | dbc8598d569ec2a6d0d936c8488e90da9fc8d820 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,056 | py | from harpiya.addons.account.tests.account_test_classes import AccountingTestCase
from harpiya.tests import tagged
@tagged('post_install', '-at_install')
class TestBankStatementReconciliation(AccountingTestCase):
def setUp(self):
super(TestBankStatementReconciliation, self).setUp()
self.bs_model = self.env['account.bank.statement']
self.bsl_model = self.env['account.bank.statement.line']
self.reconciliation_widget = self.env['account.reconciliation.widget']
self.partner = self.env['res.partner'].create({'name': 'test'})
self.currency_usd_id = self.env.ref("base.USD").id
self.currency_euro_id = self.env.ref("base.EUR").id
def test_reconciliation_proposition(self):
rcv_mv_line = self.create_invoice(100)
st_line = self.create_statement_line(100)
# exact amount match
rec_prop = self.reconciliation_widget.get_bank_statement_line_data(st_line.ids)['lines']
prop = rec_prop[0]['reconciliation_proposition']
self.assertEqual(len(prop), 1)
self.assertEqual(prop[0]['id'], rcv_mv_line.id)
def test_full_reconcile(self):
self._reconcile_invoice_with_statement('pay_val')
def test_post_at_bank_rec_full_reconcile(self):
""" Test the full reconciliation of a bank statement directly with an invoice.
"""
self._reconcile_invoice_with_statement('bank_rec')
def _reconcile_invoice_with_statement(self, post_at):
""" Tests the reconciliation of an invoice with a bank statement, using
the provided 'post at bank reconciliation' value for the bank journal
where to generate the statement.
"""
self.bs_model.with_context(journal_type='bank')._default_journal().post_at_bank_reconciliation = post_at == 'bank_rec'
rcv_mv_line = self.create_invoice(100)
st_line = self.create_statement_line(100)
# reconcile
st_line.process_reconciliation(counterpart_aml_dicts=[{
'move_line': rcv_mv_line,
'credit': 100,
'debit': 0,
'name': rcv_mv_line.name,
}])
# check everything went as expected
self.assertTrue(st_line.journal_entry_ids)
counterpart_mv_line = None
for l in st_line.journal_entry_ids:
if l.account_id.user_type_id.type == 'receivable':
counterpart_mv_line = l
break
self.assertIsNotNone(counterpart_mv_line)
self.assertTrue(rcv_mv_line.reconciled)
self.assertTrue(counterpart_mv_line.reconciled)
self.assertEqual(counterpart_mv_line.matched_credit_ids, rcv_mv_line.matched_debit_ids)
self.assertEqual(rcv_mv_line.move_id.invoice_payment_state, 'paid', "The related invoice's state should now be 'paid'")
def test_reconcile_with_write_off(self):
pass
def create_invoice(self, amount):
""" Return the move line that gets to be reconciled (the one in the receivable account) """
move = self.env['account.move'].create({
'type': 'out_invoice',
'partner_id': self.partner.id,
'invoice_line_ids': [(0, 0, {
'quantity': 1,
'price_unit': amount,
'name': 'test invoice',
})],
})
move.post()
return move.line_ids.filtered(lambda line: line.account_id.user_type_id.type in ('receivable', 'payable'))
def create_statement_line(self, st_line_amount):
journal = self.bs_model.with_context(journal_type='bank')._default_journal()
#journal = self.env.ref('l10n_be.bank_journal')
bank_stmt = self.bs_model.create({'journal_id': journal.id})
bank_stmt_line = self.bsl_model.create({
'name': '_',
'statement_id': bank_stmt.id,
'partner_id': self.partner.id,
'amount': st_line_amount,
})
return bank_stmt_line
def test_confirm_statement_usd(self):
company = self.env.ref('base.main_company')
self.cr.execute("UPDATE res_company SET currency_id = %s WHERE id = %s", [self.currency_euro_id, company.id])
self.env['res.currency.rate'].search([]).unlink()
self.env['res.currency.rate'].create({
'currency_id': self.currency_usd_id,
'rate': 2.0,
'name': '2001-01-01',
})
bank_journal_usd = self.env['account.journal'].create({
'name': 'Bank US',
'type': 'bank',
'code': 'BNK68',
'currency_id': self.currency_usd_id,
})
statement = self.bs_model.create({
'journal_id': bank_journal_usd.id,
'balance_end_real': 100,
'line_ids': [(0, 0, {
'name': '_',
'partner_id': self.partner.id,
'amount': 100,
'account_id': bank_journal_usd.default_debit_account_id.id,
})],
})
statement.button_open()
statement.button_confirm_bank()
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
640112112dfc01efccf5c8bd286908af5a23e0ef | 1cb0cc435061b6a0156b37813343ae46b1f7346e | /1_learn_step/try_second/normal-Adamax-128.py | bec42472cced5d42858f0b3fb5265b09bacb02eb | [] | no_license | youthliuxi/keras | 6370a9de11e152d8ba96e68e9ff02337203b7e66 | 60a367442f74313d0bd9af01f76068d56e23bec0 | refs/heads/master | 2020-04-30T19:54:16.628943 | 2019-08-21T09:47:13 | 2019-08-21T09:47:13 | 177,051,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | # -*- coding:utf-8 -*-
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
np.random.seed(123)
from keras.layers import *
from keras.models import Sequential
from keras.utils import np_utils
from keras.datasets import mnist
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
path = "./mnist.npz"
f = np.load(path)
X_train, y_train = f['x_train'],f['y_train']
X_test, y_test = f['x_test'],f['y_test']
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
import pylab
from matplotlib import pyplot as plt
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',init = 'normal', input_shape=(1,28,28)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='Adamax',metrics=['accuracy'])
hist = model.fit(X_train, Y_train, batch_size=128, nb_epoch=100, verbose=1, validation_data=(X_test, Y_test))
log_file_name = "try_second/txt/normal-Adamax-128.txt"
with open(log_file_name,'w') as f:
f.write(str(hist.history))
# score = model.evaluate(X_test, Y_test, verbose=0, batch_size=128)
# print(score[0])
# print(score[1])
| [
"lx_einstein@sina.com"
] | lx_einstein@sina.com |
e660605572d83e89f80cee2e458890e18cd8664f | 3c8701e04900389adb40a46daedb5205d479016c | /liaoxuefeng/05-面像对象编程/02-访问限制.py | fbe708c3a95ee8d4a9d9763f74054445db652382 | [] | no_license | huboa/xuexi | 681300653b834eaf506f49987dcca83df48e8db7 | 91287721f188b5e24fbb4ccd63b60a80ed7b9426 | refs/heads/master | 2020-07-29T16:39:12.770272 | 2018-09-02T05:39:45 | 2018-09-02T05:39:45 | 73,660,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | class Student(object):
def __init__(self, name, score):
self.__name = name ###name 加上横线就变成了private 私有变量,只有内部可以访问,外部不能访问
self.__score = score
def print_score(self):
print('%s: %s' % (self.__name, self.__score))
def get_name(self): ###提供了新的方法供外部读取
return self.__name
def get_score(self):
return self.__score
def set_score(self, score): ###提供了新的方法供外部写入
if 0 <= score <= 100:
self.__score = score
else:
raise ValueError('bad score')
zsc=Student("赵胜冲",22)
bart = Student('Bart Simpson', 59)
print(zsc.get_name()) ####直接调用名字胡方法被限制,只能用get_name 获取了
###如果非要访问 可以直接
print(bart._Student__name)
| [
"wxcr11@gmail.com"
] | wxcr11@gmail.com |
a9207f2677a6c9f2a7e3870aea79ca983eafc2a6 | c22e1fe34211a62dd453946c226db06aab620368 | /mms/urls.py | 626838f8a01c482ca8afa51792694e9c72897a23 | [] | no_license | Mehedi-Bin-Hafiz/Hostel-management- | a45402fe591bb44e44a5a140c9aa8b0aa9fa5592 | 01cc0c0cb64c9cce444da8e1d4b664fa97cbf8de | refs/heads/master | 2022-03-04T02:16:56.893873 | 2019-10-05T11:58:49 | 2019-10-05T11:58:49 | 212,954,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | """mms URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import mealinput.views
import dailyexpense.views
import mealdashboard.views
import exdash.views
import index.views
import signup.views
import login.views
import manager.views
import userpro.views
import deposit.views
import depodash.views
urlpatterns = [
path('admin/', admin.site.urls),
path('',index.views.indexview,name="index"),
path('mealinput/', mealinput.views.mealinputformview, name="mealinput"),
path('expense/', dailyexpense.views.expenseformview, name="dailyexpense"),
path('deposit/', deposit.views.depositview, name="deposit"),
path('ddash/', depodash.views.depoview.as_view(), name="depodash"),
path('dashboard/', mealdashboard.views.mealview.as_view(), name="mealdashboard"),
path('edash/', exdash.views.exview.as_view(), name="exdash"),
path('signup/',signup.views.signupview,name="signup"),
path('login/',login.views.loginview,name="login"),
path('manager/',manager.views.managerview.as_view(),name='manager'),
path('userpro/',userpro.views.userproview.as_view(),name='userpro'),
path('logout/', index.views.logout, name="logout"),
]
| [
"mehedibinhafiz@gmail.com"
] | mehedibinhafiz@gmail.com |
652ee2326545c6f21803481114ec18c1a4f0726f | 7bb4954f798d295055607e1563269c2aeb10aca9 | /src/sample/tensorflow/list2/list2_1.py | ff895de22b34888156f367d15f226029f900a3e0 | [
"MIT"
] | permissive | mryyomutga/PDIII | 081aca58eb4159e3543e4e59cf1d8a4b8f84dbd1 | 2dc14e741349845bfe528c1dd06c434cf4414941 | refs/heads/master | 2020-03-08T17:56:51.845215 | 2019-02-15T01:16:53 | 2019-02-15T01:16:53 | 128,282,342 | 1 | 0 | null | 2018-12-22T15:11:29 | 2018-04-06T01:09:33 | Python | UTF-8 | Python | false | false | 369 | py | import tensorflow as tf
# constantは定数定義のAPI
# 単一の数字のテンソル
t1 = tf.constant(1, name="Rank0")
# 配列のテンソル
t2 = tf.constant([1, 2], name="Rank1")
# 多次元配列のテンソル
t3 = tf.constant([[1, 2], [3, 4]], name="Rank2")
with tf.Session() as sess:
print(sess.run(t1))
print(sess.run(t2))
print(sess.run(t3))
| [
"mryyomutga@gmail.com"
] | mryyomutga@gmail.com |
e2128c22737b078fb1161b9444fac63737b41c3a | 072077377f8c3181923ba84dc7b11e0a6d5afc2b | /vendors/okta/models/log_user_agent.py | c62996bcde78021e701321b423c904014c303a36 | [] | no_license | rafaelfoster/sophoscentral_okta_integration | c2a3ac472df6f2ac03770689f3c5213794f1e48e | d6170b700164ece51a831c27aee55bd498b94001 | refs/heads/master | 2023-02-15T11:30:40.195653 | 2021-01-07T21:32:35 | 2021-01-07T21:32:35 | 325,614,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
class LogUserAgent(
OktaObject
):
"""
A class for LogUserAgent objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.browser = config["browser"]\
if "browser" in config else None
self.os = config["os"]\
if "os" in config else None
self.raw_user_agent = config["rawUserAgent"]\
if "rawUserAgent" in config else None
else:
self.browser = None
self.os = None
self.raw_user_agent = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"browser": self.browser,
"os": self.os,
"rawUserAgent": self.raw_user_agent
}
parent_req_format.update(current_obj_format)
return parent_req_format
| [
"rafaelgfoster@gmail.com"
] | rafaelgfoster@gmail.com |
8e495ea7490a463b054e66a65292e96eb033f824 | 074acb4439a97b76ea300b2c07d6a2457b04849f | /zombie/compat.py | 4cfeccdfa331e0ab2937545e840daf0fd2c336fe | [
"MIT"
] | permissive | graffic/python-zombie | 6d79ffb643034c27e84c18b697849d45eb901a06 | 7bfea60b3946d6b20dcc4f70896ffcfd4c55ac1f | refs/heads/master | 2021-01-20T21:57:11.314505 | 2013-08-04T15:12:01 | 2013-08-04T15:12:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | import sys
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: nocover
from io import BytesIO as StringIO
from urllib.parse import urlparse
else: # pragma: nocover
from urlparse import urlparse # noqa
from cStringIO import StringIO # noqa
| [
"lists@ryanpetrello.com"
] | lists@ryanpetrello.com |
445a0485ed0661d1bcc771ce4a5393dd09284f34 | ef1f62cf4e53f856bf763ac0dee73f054518530d | /Week_08/231.Power_of_Two.py | 3f3ec4663f705a473f8c713c5b03eefb4fc9e285 | [] | no_license | ZHHJemotion/algorithm008-class01 | 3338af3619d8e1754a62af6a852f517b47298d95 | 5bb7d2b74110df0b5788b94c69582552d711563a | refs/heads/master | 2022-11-12T09:26:24.941738 | 2020-06-30T15:29:20 | 2020-06-30T15:29:20 | 255,102,230 | 0 | 0 | null | 2020-04-12T14:39:17 | 2020-04-12T14:39:17 | null | UTF-8 | Python | false | false | 564 | py | # Given an integer, write a function to determine if it is a power of two.
#
# Example 1:
#
#
# Input: 1
# Output: true
# Explanation: 20 = 1
#
#
# Example 2:
#
#
# Input: 16
# Output: true
# Explanation: 24 = 16
#
# Example 3:
#
#
# Input: 218
# Output: false
# Related Topics Math Bit Manipulation
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
# 位运算
return n != 0 and (n & (n-1)) == 0
# leetcode submit region end(Prohibit modification and deletion)
| [
"zhhjemotion@hotmail.com"
] | zhhjemotion@hotmail.com |
f9d5fd9ac3b57c62d986603e4fc8602020d3b07a | 77f07d6f08a3c401f528a4aa1fa8308e12598f44 | /urls.py | 18ac98ada7b430af52f4637aac387395a81b56aa | [] | no_license | sgammon/AppEngine-Toolkit-Skeleton | 7de4b9184d501865e1aae35a7c8f7b2a398b859a | df7a97333fcea8915c038de67c6836e7756a3961 | refs/heads/master | 2016-09-11T06:59:11.412514 | 2012-01-03T06:01:05 | 2012-01-03T06:01:05 | 2,559,656 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # -*- coding: utf-8 -*-
"""
urls
~~~~
URL definitions.
:copyright: 2009 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from config import config
from webapp2 import import_string
def get_rules():
"""Returns a list of URL rules for the application. The list can be
defined entirely here or in separate ``urls.py`` files.
:param app:
The WSGI application instance.
:return:
A list of class:`tipfy.Rule` instances.
"""
# Here we show an example of joining all rules from the
# ``apps_installed`` definition set in config.py.
rules = []
for app_module in config.get('webapp2')['apps_installed']:
try:
# Load the urls module from the app and extend our rules.
app_rules = import_string('%s.routing' % app_module)
rules.extend(app_rules.get_rules())
except ImportError:
pass
return rules | [
"sgammon@bluestatedigital.com"
] | sgammon@bluestatedigital.com |
20eb4f23b767b4473d56ae05a400d01d40f44b95 | 8e352bddc79e22604cdc23bf2e33d3d36dd30502 | /linux_rest_api/filesystem/modes.py | ce03c36aced81c91a7b74f35753ef53c03f2fb36 | [] | no_license | kissgyorgy/linux-rest-api | 330950ba7e23932dd2cf3a1b026f83f587773110 | 3f46031f88e75c0a317f96cdb123fe5980877bcb | refs/heads/master | 2023-02-16T12:47:54.960902 | 2018-09-20T05:05:14 | 2020-09-23T11:56:33 | 143,931,513 | 7 | 2 | null | 2023-02-02T11:47:12 | 2018-08-07T22:06:32 | Python | UTF-8 | Python | false | false | 391 | py | import stat
def octal_mode(st_mode: int) -> str:
return oct(stat.S_IMODE(st_mode))[2:]
def symbolic_mode(st_mode: int) -> str:
lm = long_mode(st_mode)
user = lm[0:3].replace("-", "")
group = lm[3:6].replace("-", "")
other = lm[6:].replace("-", "")
return f"u={user},g={group},o={other}"
def long_mode(st_mode: int) -> str:
return stat.filemode(st_mode)[1:]
| [
"kissgyorgy@me.com"
] | kissgyorgy@me.com |
5753358a6c496e08c5026fc69e200f9fea67ff63 | 06f7ffdae684ac3cc258c45c3daabce98243f64f | /vsts/vsts/git/v4_1/models/git_commit_changes.py | 9d82a62347a190cdf6e2bb739ac857cde0364cd7 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | kenkuo/azure-devops-python-api | 7dbfb35f1c9637c9db10207824dd535c4d6861e8 | 9ac38a97a06ee9e0ee56530de170154f6ed39c98 | refs/heads/master | 2020-04-03T17:47:29.526104 | 2018-10-25T17:46:09 | 2018-10-25T17:46:09 | 155,459,045 | 0 | 0 | MIT | 2018-10-30T21:32:43 | 2018-10-30T21:32:42 | null | UTF-8 | Python | false | false | 1,142 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class GitCommitChanges(Model):
"""GitCommitChanges.
:param change_counts:
:type change_counts: dict
:param changes:
:type changes: list of :class:`object <git.v4_1.models.object>`
"""
_attribute_map = {
'change_counts': {'key': 'changeCounts', 'type': '{int}'},
'changes': {'key': 'changes', 'type': '[object]'}
}
def __init__(self, change_counts=None, changes=None):
super(GitCommitChanges, self).__init__()
self.change_counts = change_counts
self.changes = changes
| [
"tedchamb@microsoft.com"
] | tedchamb@microsoft.com |
acb8c308b81cccc7646965c8b7c9207a7d2d4b91 | 7a7a094f77a178aba06fae2176919f926119c356 | /data_structures_and_algorithms/adjacency_matrix.py | 651829845865a5c336bc7f66a83ea2dd84abd223 | [] | no_license | vlad-bezden/data_structures_and_algorithms | 9fd0c67a16ff1893d830ae68f43cabb75f5d6a99 | 3ba9b904ed5955de24053cb5941a7c5a71600106 | refs/heads/master | 2021-07-03T02:12:37.207767 | 2020-09-12T12:58:13 | 2020-09-12T12:58:13 | 165,452,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | """
Converts graph presented as dict to matrix
{'A': ['B', 'C'],
'B': ['A', 'C', 'E'],
'C': ['A', 'B', 'E', 'F'],
'E': ['B', 'C'],
'F': ['C']}
[[0, 1, 1, 0, 0],
[1, 0, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0]]
"""
from pprint import pprint
from typing import Dict, List
Row = List[int]
Matrix = List[Row]
Graph = Dict[str, List[str]]
def graph_to_matrix(graph: Graph) -> Matrix:
matrix_elements = sorted(graph)
rows = len(matrix_elements)
# allocate matrix size [row x row] with 0
matrix = [[0] * rows for _ in range(rows)]
for i, row in enumerate(matrix_elements):
for j, col in enumerate(matrix_elements):
if col in graph[row]:
matrix[i][j] = 1
return matrix
def main():
graph = {
"A": ["B", "C"],
"B": ["A", "C", "E"],
"C": ["A", "B", "E", "F"],
"E": ["B", "C"],
"F": ["C"],
}
pprint(graph)
matrix = graph_to_matrix(graph)
pprint(matrix)
if __name__ == "__main__":
main()
| [
"vlad.bezden@gmail.com"
] | vlad.bezden@gmail.com |
19bab16feffcbe12224818535825db475ac0c04c | 12a8cc08189cbaf84f4a3fd3a54595097a03ef3c | /app/main/forms.py | 3a10472078aa8b8e9aab50947a6c0a1c07018340 | [] | no_license | kepha-okari/watchlist-2 | 723acc9a616f10b1caab4c245763856b5c055c54 | 0cadf9f905d8788dc0999d4addd506d03949d33c | refs/heads/master | 2022-03-27T21:34:43.102901 | 2017-12-16T08:58:45 | 2017-12-16T08:58:45 | 114,447,275 | 0 | 1 | null | 2020-01-28T18:48:26 | 2017-12-16T08:57:20 | Python | UTF-8 | Python | false | false | 387 | py | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import Required
class ReviewForm(FlaskForm):
"""Class to contain forms instances and methods"""
title = StringField('Review title', validators=[Required()])
review = TextAreaField('Movie review', validators=[Required()])
submit = SubmitField('Submit')
| [
"kephaokari@gmail.com"
] | kephaokari@gmail.com |
b94406750358432b3d6cb7ea425f4f5ff477df4d | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/quiz/admin_20210424134332.py | ed0af28881c12976366a229f411022f9ab0e15c5 | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from django.contrib import admin
from quiz.models import Quiz
from quiz.models import ICT
from quiz.models import Ban
from quiz.models import Math
from quiz.models import Science
from quiz.models import GK
class QuizAdmin(admin.ModelAdmin):
list_display = ('question',)
class ICTAdmin(admin.ModelAdmin):
list_display = ('question',)
class BanAdmin(admin.ModelAdmin):
list_display = ('question',)
class MathAdmin(admin.ModelAdmin):
list_display = ('question',)
class ScienceAdmin(admin.ModelAdmin):
list_display = ('question',)
class GKAdmin(admin.ModelAdmin):
list_display = ('question',)
admin.site.register(Quiz, QuizAdmin)
admin.site.register(ICT, ICTAdmin)
admin.site.register(Ban, BanAdmin)
admin.site.register(Math, MathAdmin)
admin.site.register(Science, ScienceAdmin)
admin.site.register(GK, GKAdmin)
admin.site.register(MA, MAAdmin)
| [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
0ef419920e487fcf0cd41aee9cd790d9ebac5369 | 9073e836f6f66110af04110b0d0117ab224fede4 | /eg_26.py | ef0f7c0bf58fe077236400e5018daf4a69b8d24b | [] | no_license | striveman1379/python100_examples | ec860d65aeff62b7e31b30798e7ca38db1297eec | c701b4444469a03efac3436f2a65199615f9e3cb | refs/heads/master | 2020-04-18T00:57:15.114970 | 2018-11-06T09:39:01 | 2018-11-06T09:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding:utf-8 -*-
# 利用递归方法求5!
def num(n):
result = 0
if n == 0:
result = 1
else:
result = n * num(n-1)
return result
print('5!= %d' % num(5)) | [
"you@example.com"
] | you@example.com |
61aca2f7a8eb22d3acbc9d35bbd3bcf742e3de7f | 1b60c5833acfb2669b1b51dc2a3616b6017986b6 | /question_answering/utils/utils.py | 48dbbe97f9cd7fb282bcfbc7fdfbe2e354cea37a | [] | no_license | akoshel/QuestionAnswering | 1a61a53c1b3fadde6ae6361a5d628b57625da39b | c33ac39945947df880f0d390fddfd0e0daf9dda8 | refs/heads/main | 2023-07-27T19:12:29.988477 | 2021-09-14T10:28:54 | 2021-09-14T10:28:54 | 385,997,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | from loguru import logger
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
def train_epoch(model: nn.Module, iterator: DataLoader, criterion, optimizer, device, writer):
model.train()
epoch_loss = 0
logger.info("train epoch started")
for i, batch in enumerate(iterator):
features, attention_mask, start_token, end_token = batch
start_logits, end_logits = model(features.to(device), attention_mask.to(device))
start_loss = criterion(start_logits, start_token.to(device))
# start_loss.backward()
end_loss = criterion(end_logits, end_token.to(device))
# end_loss.backward()
total_loss = (start_loss + end_loss) / 2
total_loss.backward()
optimizer.step()
epoch_loss += total_loss
if i % 100 == 0:
logger.info("iteration {i} loss {l}", i=i, l=total_loss)
writer.add_scalar('train loss', total_loss.item())
return epoch_loss / len(iterator)
def validate(model: nn.Module, iterator: DataLoader, criterion, device, writer):
model.eval()
val_loss = 0
logger.info("Eval started")
with torch.no_grad():
for i, batch in enumerate(iterator):
features, attention_mask, start_token, end_token = batch
start_logits, end_logits = model(features.to(device), attention_mask.to(device))
start_loss = criterion(start_logits, start_token.to(device))
end_loss = criterion(end_logits, end_token.to(device))
total_loss = (start_loss + end_loss) / 2
val_loss += total_loss
writer.add_scalar('validation loss', total_loss.item())
return val_loss / len(iterator)
| [
"johndoe@example.com"
] | johndoe@example.com |
a57086090d53b07ccbeee482c97989fabc0d8994 | 3523676133fe91bd69d87b60757af041cc8b603b | /offset/time.py | 3705ffc2cf60ca09f6874880f35622a4fa76db9a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | cautonwong/offset | 247377c0756970b7f6fef37fc4d37c192510abc7 | b8561635a4cb44a9f47d086163f4d0b58bb8fd74 | refs/heads/master | 2021-01-15T22:25:07.504578 | 2014-08-10T18:59:58 | 2014-08-10T18:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | # -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
from .core.util import nanotime, from_nanotime
from .core import timer
from .core.chan import makechan, select
NANOSECOND = 1
MICROSECOND = 1000 * NANOSECOND
MILLISECOND = 1000 * MICROSECOND
SECOND = 1000 * MILLISECOND
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
nano = nanotime
sleep = timer.sleep
def _sendtime(now, t, c):
select(c.if_send(from_nanotime(now)))
class Timer(object):
""" The Timer instance represents a single event.
When the timer expires, the current time will be sent on c """
def __init__(self, interval):
self.c = makechan(1)
self.t = timer.Timer(_sendtime, interval, args=(self.c,))
self.t.start()
def reset(self, interval):
""" reset the timer interval """
w = nanotime() + interval
self.t.stop()
self.t.when = w
self.t.start()
def stop(self):
self.t.stop()
self.c.close()
def After(interval):
""" After waits for the duration to elapse and then sends the current time
on the returned channel.
It is equivalent to Timer(interval).c
"""
return Timer(interval).c
def AfterFunc(interval, func, args=None, kwargs=None):
""" AfterFunc waits for the duration to elapse and then calls f in its own
goroutine. It returns a Timer that can be used to cancel the call using its
Stop method. """
t = timer.Timer(func, interval, args=args, kwargs=kwargs)
t.start()
return t
class Ticker(object):
""" returns a new Ticker containing a channel that will send the
time with a period specified by the duration argument.
It adjusts the intervals or drops ticks to make up for slow receivers.
The duration d must be greater than zero.
"""
def __init__(self, interval):
if interval < 0:
raise ValueError("non-positive interval")
self.c = makechan(1)
# set the runtime timer
self.t = timer.Timer(_sendtime, interval, interval, args=(self.c,))
self.t.start()
def stop(self):
self.c.close()
self.t.stop()
def Tick(interval):
""" Tick is a convenience wrapper for Ticker providing access
to the ticking channel. Useful for clients that no need to shutdown
the ticker """
if interval <= 0:
return
return Ticker(interval).c
| [
"bchesneau@gmail.com"
] | bchesneau@gmail.com |
f82d6a6bf94f54d656ce7cc54a735785cc4eb61f | 305b5459c319688a7a7184c959fc335d464a0e0c | /test_project/test_app/tests/crawler_tests.py | 1f9c29c6a6217e4e666c3c85ba5567b605e0efa2 | [] | no_license | ericholscher/django-crawler | 7595052a3a374c62c3fa6063e091aba4ab5ddb11 | 716f75a5a23b1befa28bbf2da41df4335eb0bf5c | refs/heads/master | 2021-01-01T16:13:29.020267 | 2015-06-29T17:53:42 | 2015-06-29T17:53:42 | 902,366 | 18 | 11 | null | 2015-06-29T17:54:12 | 2010-09-10T21:27:11 | Python | UTF-8 | Python | false | false | 2,643 | py | """
This file is to test testmaker. It will run over the polls app and with the crawler and with test maker outputting things. Hopefully this will provide a sane way to test testmaker.
"""
from django.test.testcases import TestCase
from crawler.base import Crawler
import logging
import os
class CrawlerTests(TestCase):
"""
Tests to test the Crawler API
"""
urls = "test_project.polls.urls"
fixtures = ['polls_testmaker.json']
def setUp(self):
self.log = logging.getLogger('crawler')
[self.log.removeHandler(h) for h in self.log.handlers]
self.log.setLevel(logging.DEBUG)
handler = logging.FileHandler('crawler_log', 'a')
handler.setFormatter(logging.Formatter('%(message)s'))
self.log.addHandler(handler)
def tearDown(self):
os.remove('crawler_log')
def test_basic_crawling(self):
c = Crawler('/')
c.run()
self.assertEqual(c.crawled, {'/': True, u'/1': True, u'/2': True})
def test_relative_crawling(self):
c = Crawler('/1')
c.run()
self.assertEqual(c.crawled, {u'/1': True})
def test_url_plugin(self):
conf_urls = {'this_wont_be_crawled': True}
c = Crawler('/', conf_urls=conf_urls)
c.run()
logs = open('crawler_log')
output = logs.read()
self.assertTrue(output.find('These patterns were not matched during the crawl: this_wont_be_crawled') != -1)
def test_time_plugin(self):
#This isn't testing much, but I can't know how long the time will take
c = Crawler('/')
c.run()
logs = open('crawler_log')
output = logs.read()
self.assertTrue(output.find('Time taken:') != -1)
def test_memory_plugin(self):
from crawler.plugins.memory_plugin import Memory
Memory.active = True
c = Crawler('/')
c.run()
logs = open('crawler_log')
output = logs.read()
self.assertTrue(output.find('Memory consumed:') != -1)
#Guppy makes the tests take a lot longer, uncomment this if you want to
#test it.
"""
def test_guppy_plugin(self):
#This isn't testing much, but I can't know how long the time will take
from crawler.plugins.guppy_plugin import ACTIVE, Heap
if ACTIVE:
Heap.active = True
c = Crawler('/')
c.run()
logs = open('crawler_log')
output = logs.read()
import ipdb; ipdb.set_trace()
self.assertTrue(output.find('heap') != -1)
else:
print "Skipping memory test, as guppy isn't installed"
"""
| [
"eric@ericholscher.com"
] | eric@ericholscher.com |
83557ba6614ffd8a233757c86d0f87a2b54d2e95 | 4eddf6a34715752dc652571b1ab274f51ceb5da0 | /Bayes Classification/.history/Bayes_main_20210428125730.py | e07e1b20e37d7581a86711c9e9dc3c7244e9fcf3 | [] | no_license | Suelt/Hust-SE-introduction-to-ML | 649aba0e5b41363ceac03330ef02982982a0615d | a66785c3085da573f5748d13608eabf02e616321 | refs/heads/master | 2023-05-27T13:13:41.058545 | 2021-06-10T05:44:02 | 2021-06-10T05:44:02 | 375,582,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import model_selection
credit = pd.read_csv("C:\\pyproject\\Bayes Classification\\transformed.csv")
y = credit['credit_risk']
X = credit.loc[:,'status':'foreign_worker']
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=1)
cols = ['status','duration','credit_history', 'purpose','amount','savings', 'employment_duration','installment_rate', 'personal_status_sex', 'other_debtors',
'present_residence','property','age','other_installment_plans','housing','number_credits','job','people_liable','telephone','foreign_worker']
dict_main_true={}
dict_main_false={}
train=credit.loc[y_train.index]
train_true=credit[train['credit_risk'].isin(['good'])]
train_bad=credit[train['credit_risk'].isin(['bad'])]
print(train_true.shape[0])
for col in cols:
dict_main_true[col]={}
dict_main_false[col]={}
| [
"2552925383@qq.com"
] | 2552925383@qq.com |
93b28ca4463dc909e11f966c914b21a0d0b546f4 | 9a63e1b1f026dcde05d7ee1a00b836a6c34e5d43 | /tests/appointment/test_metrics.py | 08b1eda65fe4eb9f43f5f6c33de4203767d1c277 | [
"BSD-3-Clause"
] | permissive | databill86/poli-sci-kit | 201770634c05463fe4ef00b20a47de95d276b6cd | a3f308ccd914cf18105de89218e23fe95a0b1de7 | refs/heads/main | 2023-06-26T17:12:51.939600 | 2021-07-29T15:03:05 | 2021-07-29T15:03:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | """Appointment metric tests"""
from poli_sci_kit.appointment import metrics
def test_ideal_share(share, total_shares, seats):
assert (
round(
metrics.ideal_share(
share=share, total_shares=total_shares, total_alloc=seats
),
4,
)
== 6.9222
)
def test_alloc_to_share_ratio(share, total_shares, allocation, seats):
assert (
round(
metrics.alloc_to_share_ratio(
share=share,
total_shares=total_shares,
allocation=allocation,
total_alloc=seats,
),
4,
)
== 1.0112
)
def test_square_alloc_to_share_ratio(share, total_shares, allocation, seats):
assert (
round(
metrics.sqr_alloc_to_share_error(
share=share,
total_shares=total_shares,
allocation=allocation,
total_alloc=seats,
),
6,
)
== 0.000126
)
def test_total_alloc_to_share_error(tie_votes_list, allocations):
assert (
round(
metrics.total_alloc_to_share_error(
shares=tie_votes_list, allocations=allocations, proportional=True
),
6,
)
== 0.006835
)
def test_rep_weight(share, allocation):
assert (
round(metrics.rep_weight(share=share, allocation=allocation), 4) == 274082.5714
)
def test_sqr_rep_weight_error(share, total_shares, allocation, seats):
assert (
round(
metrics.sqr_rep_weight_error(
share=share,
total_shares=total_shares,
allocation=allocation,
total_alloc=seats,
),
4,
)
== 9480416.9437
)
def test_total_rep_weight_error(tie_votes_list, allocations):
assert (
round(
metrics.total_rep_weight_error(
shares=tie_votes_list, allocations=allocations, proportional=True
),
4,
)
== 594037282.4765
)
def test_div_not_0(short_votes_list, q, div_index_metrics):
assert (
metrics.div_index(shares=short_votes_list, q=q, metric_type=div_index_metrics)
!= 0
)
def test_dispr_not_0(short_votes_list, allocations, dispr_index_metrics):
assert (
metrics.dispr_index(
shares=short_votes_list,
allocations=allocations,
metric_type=dispr_index_metrics,
)
!= 0
)
def test_effective_number_of_groups_not_0(short_votes_list, effective_group_metrics):
assert (
metrics.effective_number_of_groups(
shares=short_votes_list, metric_type=effective_group_metrics
)
!= 0
)
| [
"andrew.t.mcallister@gmail.com"
] | andrew.t.mcallister@gmail.com |
ebf1aff1bbdf5a219a46dbeed92232bba9a0fad0 | b57b0a14df5c6841f04cccb7b02ad04afbca18f8 | /azure_iot_edge/tests/test_check.py | 2b89dda24289d6d675f0c94de7469395f4b1e350 | [
"AFL-3.0",
"BSD-3-Clause-Modification",
"LGPL-3.0-only",
"Unlicense",
"LGPL-2.1-only",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | zeroc0d3/integrations-core | d9c99803c049668b7f9f9c796d338e343d3d46ee | 634d567f3c38d32aabb3f4c16b50bcfa8a4ae0fb | refs/heads/master | 2021-09-28T18:37:00.650406 | 2021-09-13T11:59:45 | 2021-09-13T11:59:45 | 199,758,958 | 0 | 0 | BSD-3-Clause | 2019-07-31T02:01:25 | 2019-07-31T02:01:24 | null | UTF-8 | Python | false | false | 3,884 | py | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import pytest
import requests
from datadog_checks.azure_iot_edge import AzureIoTEdgeCheck
from datadog_checks.base.stubs.aggregator import AggregatorStub
from datadog_checks.base.stubs.datadog_agent import DatadogAgentStub
from datadog_checks.dev.utils import get_metadata_metrics
from . import common
@pytest.mark.usefixtures("mock_server")
def test_check(aggregator, mock_instance):
# type: (AggregatorStub, dict) -> None
"""
Under normal conditions, metrics and service checks are collected as expected.
"""
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance])
check.check(mock_instance)
for metric, metric_type in common.HUB_METRICS:
# Don't assert exact tags since they're very complex (many cross products).
aggregator.assert_metric(metric, metric_type=metric_type)
m = aggregator._metrics[metric][0]
assert set(m.tags) >= set(common.TAGS)
for metric, metric_type, metric_tags in common.AGENT_METRICS:
tags = common.TAGS + metric_tags
aggregator.assert_metric(metric, metric_type=metric_type, count=1, tags=tags)
for metric, metric_type in common.MODULE_METRICS:
for module_name in common.MODULES:
tags = common.TAGS + ['module_name:{}'.format(module_name)]
aggregator.assert_metric(metric, metric_type=metric_type, count=1, tags=tags)
aggregator.assert_service_check(
'azure.iot_edge.edge_hub.prometheus.health',
AzureIoTEdgeCheck.OK,
count=1,
tags=common.CUSTOM_TAGS + ['endpoint:{}'.format(common.MOCK_EDGE_HUB_PROMETHEUS_URL)],
)
aggregator.assert_service_check(
'azure.iot_edge.edge_agent.prometheus.health',
AzureIoTEdgeCheck.OK,
count=1,
tags=common.CUSTOM_TAGS + ['endpoint:{}'.format(common.MOCK_EDGE_AGENT_PROMETHEUS_URL)],
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@pytest.mark.usefixtures("mock_server")
def test_version_metadata(datadog_agent, mock_instance):
# type: (DatadogAgentStub, dict) -> None
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [mock_instance])
check.check_id = 'test:123'
check.run()
major, minor, patch, raw = common.MOCK_EDGE_AGENT_VERSION
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': raw,
}
datadog_agent.assert_metadata('test:123', version_metadata)
@pytest.mark.usefixtures("mock_server")
@pytest.mark.parametrize(
"option, url, service_check",
[
pytest.param(
"edge_agent_prometheus_url",
common.MOCK_EDGE_AGENT_PROMETHEUS_URL,
"azure.iot_edge.edge_agent.prometheus.health",
id="edge-agent",
),
pytest.param(
"edge_hub_prometheus_url",
common.MOCK_EDGE_HUB_PROMETHEUS_URL,
"azure.iot_edge.edge_hub.prometheus.health",
id="edge-hub",
),
],
)
def test_prometheus_endpoint_down(aggregator, mock_instance, option, url, service_check):
# type: (AggregatorStub, dict, str, str, str) -> None
"""
When a Prometheus endpoint is unreachable, service check reports as CRITICAL.
"""
instance = copy.deepcopy(mock_instance)
wrong_port = common.MOCK_SERVER_PORT + 1 # Will trigger exception.
instance[option] = url.replace(str(common.MOCK_SERVER_PORT), str(wrong_port))
check = AzureIoTEdgeCheck('azure_iot_edge', {}, [instance])
with pytest.raises(requests.ConnectionError):
check.check(instance)
aggregator.assert_service_check(service_check, AzureIoTEdgeCheck.CRITICAL)
| [
"noreply@github.com"
] | zeroc0d3.noreply@github.com |
64c9f98a0759a0df0b44851c167e3a1d53498e0c | 885a722e3e5814ae4942ac5e8cf8d0091e734b4c | /BAEKJOON/11000~/11655_ROT13_python/CodingTest.py | ba92d5f2015a4ebedf5f1c18c4985c26f96f09ed | [] | no_license | ledpear/algorithm | 52f3ea25842eee20b3bbd48e51825b9df4942e03 | 4922c6fe5ca0b98a90dee218b756006e7ba05d82 | refs/heads/master | 2023-06-09T17:47:45.674244 | 2023-06-03T13:47:11 | 2023-06-03T13:47:11 | 133,370,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from string import ascii_uppercase
from string import ascii_lowercase
def ROT13(c):
list_upper = list(ascii_uppercase)
list_lower = list(ascii_lowercase)
if c in list_upper :
i = list_upper.index(c) + 13
if i >= 26:
i -= 26
return list_upper[i]
if c in list_lower :
i = list_lower.index(c) + 13
if i >= 26:
i -= 26
return list_lower[i]
return c
input = list(input())
size = len(input)
for i in range(size):
input[i] = ROT13(input[i])
print(''.join(input)) | [
"tjsrb75@gmail.com"
] | tjsrb75@gmail.com |
1f713eff37f69e1eb1b584fea35ebe8ac07f8c25 | 184ba93339a2af7d375bf0e4c9b787ec2a0c3c34 | /hack/picoctf.org/General Skills/PW Crack 4/level4.py | 526e2d62ae8d52e77fa7241cf5d4e59494727f96 | [] | no_license | pchaos/others | 4436dc3ab134d5ed7868df22d4098c93078aae1f | ff1d7229b075a1bb10dbbae8fc5b2bfe8ea43987 | refs/heads/master | 2023-07-14T19:33:36.238932 | 2023-06-21T09:09:04 | 2023-06-21T09:09:04 | 107,945,559 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | import hashlib
### THIS FUNCTION WILL NOT HELP YOU FIND THE FLAG --LT ########################
def str_xor(secret, key):
# extend key to secret length
new_key = key
i = 0
while len(new_key) < len(secret):
new_key = new_key + key[i]
i = (i + 1) % len(key)
return "".join(
[
chr(ord(secret_c) ^ ord(new_key_c))
for (secret_c, new_key_c) in zip(secret, new_key)
]
)
###############################################################################
flag_enc = open("level4.flag.txt.enc", "rb").read()
correct_pw_hash = open("level4.hash.bin", "rb").read()
def hash_pw(pw_str):
pw_bytes = bytearray()
pw_bytes.extend(pw_str.encode())
m = hashlib.md5()
m.update(pw_bytes)
return m.digest()
def level_4_pw_check():
user_pw = input("Please enter correct password for flag: ")
user_pw_hash = hash_pw(user_pw)
if user_pw_hash == correct_pw_hash:
print("Welcome back... your flag, user:")
decryption = str_xor(flag_enc.decode(), user_pw)
print(decryption)
return
print("That password is incorrect")
level_4_pw_check()
# The strings below are 100 possibilities for the correct password.
# (Only 1 is correct)
pos_pw_list = [
"158f",
"1655",
"d21e",
"4966",
"ed69",
"1010",
"dded",
"844c",
"40ab",
"a948",
"156c",
"ab7f",
"4a5f",
"e38c",
"ba12",
"f7fd",
"d780",
"4f4d",
"5ba1",
"96c5",
"55b9",
"8a67",
"d32b",
"aa7a",
"514b",
"e4e1",
"1230",
"cd19",
"d6dd",
"b01f",
"fd2f",
"7587",
"86c2",
"d7b8",
"55a2",
"b77c",
"7ffe",
"4420",
"e0ee",
"d8fb",
"d748",
"b0fe",
"2a37",
"a638",
"52db",
"51b7",
"5526",
"40ed",
"5356",
"6ad4",
"2ddd",
"177d",
"84ae",
"cf88",
"97a3",
"17ad",
"7124",
"eff2",
"e373",
"c974",
"7689",
"b8b2",
"e899",
"d042",
"47d9",
"cca9",
"ab2a",
"de77",
"4654",
"9ecb",
"ab6e",
"bb8e",
"b76b",
"d661",
"63f8",
"7095",
"567e",
"b837",
"2b80",
"ad4f",
"c514",
"ffa4",
"fc37",
"7254",
"b48b",
"d38b",
"a02b",
"ec6c",
"eacc",
"8b70",
"b03e",
"1b36",
"81ff",
"77e4",
"dbe6",
"59d9",
"fd6a",
"5653",
"8b95",
"d0e5",
]
| [
"drifthua@gmail.com"
] | drifthua@gmail.com |
0b7cd367e8fb6f11318b27a150bd97bc031d5441 | 2b15168bc67ee935446f51c46045f73346369c5a | /model/resnet50.py | 1a54e0fc47e9fea59fb465a498f7c2c608b9c85f | [] | no_license | jason9075/tf2_arcface | 6c37500c9c14170ea6731f6a0d79a19f088c32d3 | 6fabcdf9c3c9a12603456476fc8052de2830684d | refs/heads/master | 2023-04-30T11:42:52.845549 | 2021-04-01T06:59:39 | 2021-04-01T06:59:39 | 311,858,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,769 | py | import tensorflow as tf
def create_resnet50(input_node, embedding_size, layers=None, is_train=False):
expansion = 1
net = tf.keras.layers.ZeroPadding2D(padding=1, name='first_padding')(input_node)
net = tf.keras.layers.Conv2D(64,
3,
strides=1,
use_bias=False,
name='conv1__conv')(net)
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name='bn1__bn')(net)
net = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25),
shared_axes=[1, 2], name='prelu__prelu')(net)
net = make_layer(net, 64, layers[0], stride=2, expansion=expansion, prefix='layer1')
net = make_layer(net, 128, layers[1], stride=2, expansion=expansion, prefix='layer2')
net = make_layer(net, 256, layers[2], stride=2, expansion=expansion, prefix='layer3')
net = make_layer(net, 512, layers[3], stride=2, expansion=expansion, prefix='layer4')
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name='bn2__bn')(net)
# Because in pytorch is channel first and it start from index 1, so the pytorch order is NCHW.
# And here we have to switch the tensorflow order from NHWC to NCHW
net = tf.transpose(net, [0, 3, 1, 2])
net = tf.keras.layers.Flatten()(net)
if is_train:
net = tf.keras.layers.Dropout(0.4)(net)
net = tf.keras.layers.Dense(embedding_size, name='fc__fc')(net)
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name='features__bn')(net)
return net
def make_layer(net, out_ch, num_layer, stride=1, expansion=1, prefix='layer'):
net = basic_block(net, 64, out_ch, stride=stride, expansion=expansion, downsample=True, prefix=f'{prefix}.0')
for idx in range(1, num_layer):
net = basic_block(net, 64, out_ch, stride=1, expansion=expansion, prefix=f'{prefix}.{idx}')
return net
def basic_block(net, in_ch,
out_ch,
stride=1,
groups=1,
expansion=1,
downsample=False,
prefix='basic_block'):
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.bn1__bn')(net)
out = tf.keras.layers.ZeroPadding2D(padding=1, name=f'{prefix}.padding1')(out)
out = tf.keras.layers.Conv2D(out_ch,
3,
strides=1,
use_bias=False,
name=f'{prefix}.conv1__conv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.bn2__bn')(out)
out = tf.keras.layers.PReLU(alpha_initializer=tf.initializers.constant(0.25),
shared_axes=[1, 2],
name=f'{prefix}.prelu__prelu')(out)
out = tf.keras.layers.ZeroPadding2D(padding=1, name=f'{prefix}.padding2')(out)
out = tf.keras.layers.Conv2D(out_ch,
3,
strides=stride,
use_bias=False,
name=f'{prefix}.conv2__conv')(out)
out = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.bn3__bn')(out)
if downsample and (stride != 1 or in_ch != out_ch * expansion):
net = tf.keras.layers.Conv2D(out_ch * expansion, 1,
strides=stride,
use_bias=False,
groups=groups,
name=f'{prefix}.downsample.0__conv')(net)
net = tf.keras.layers.BatchNormalization(epsilon=1e-5, name=f'{prefix}.downsample.1__bn')(net)
return out + net
| [
"jason9075@gmail.com"
] | jason9075@gmail.com |
9c86f4be2dd731af2ce6d5d7908eeaa50af4d197 | ef1bf421aca35681574c03014e0c2b92da1e7dca | /test/test_modes/test_filewatcher.py | c9bb04ff63acd71e5503e30f6f2dda651615edff | [
"MIT"
] | permissive | pyQode/pyqode.core | 74e67f038455ea8cde2bbc5bd628652c35aff6eb | 0ffabebe4f0397d53429024f6f44db3fe97b0828 | refs/heads/master | 2020-04-12T06:36:33.483459 | 2020-01-18T14:16:08 | 2020-01-18T14:16:08 | 7,739,074 | 24 | 25 | MIT | 2020-01-18T14:16:10 | 2013-01-21T19:46:41 | Python | UTF-8 | Python | false | false | 2,194 | py | import os
import pytest
from pyqode.qt import QtCore
from pyqode.qt import QtWidgets
from pyqode.qt.QtTest import QTest
import datetime
from pyqode.core import modes
from test.helpers import editor_open, preserve_settings
file_path = os.path.join(
os.getcwd(), 'test', 'test_modes', 'file_to_watch.txt')
def setup_module():
with open(file_path, 'w') as f:
f.write("test file initial")
def teardown_module():
os.remove(file_path)
def get_mode(editor):
return editor.modes.get(modes.FileWatcherMode)
@editor_open(file_path)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
def accept_mbox():
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QMessageBox):
QTest.keyPress(w, QtCore.Qt.Key_Space)
def reject_mbox():
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QMessageBox):
QTest.keyPress(w, QtCore.Qt.Key_Escape)
@editor_open(file_path)
def test_modif_autoreload(editor):
mode = get_mode(editor)
mode.auto_reload = False
mode = get_mode(editor)
mode.auto_reload = True
with open(file_path, 'r') as f:
with open(file_path, 'w') as f2:
f2.write("test file %s" % datetime.datetime.now())
QTest.qWait(1000)
@editor_open(file_path)
def test_delete(editor):
mode = get_mode(editor)
mode.auto_reload = False
os.remove(file_path)
QTest.qWait(1000)
with open(file_path, 'w') as f:
f.write("test file initial")
editor.file.open(file_path)
@editor_open(file_path)
def test_none_filepath(editor):
mode = get_mode(editor)
mode.auto_reload = False
mode.auto_reload = False
p = editor.file.path
editor.file._path = None
mode._update_mtime()
editor.file._path = p
@editor_open(file_path)
def test_non_existing_file_path(editor):
mode = get_mode(editor)
mode.auto_reload = False
p = editor.file.path
editor.file._path = '/usr/blah/foo/bar.txt'
mode._update_mtime()
editor.file._path = p
| [
"colin.duquesnoy@gmail.com"
] | colin.duquesnoy@gmail.com |
a433723d25e214eee3dab87dbdbb2c3b88ba3cb4 | 79799898b833178f0af59d00e6b8d96d44d129d4 | /backend/crm/apps/finances/models/transaction.py | 0a9f9f458bdbdff8b559a40703e33f4c404c808f | [] | no_license | edzen12/min_crm | 924edeede9250bc3fabfb45a3f0e01a3768f11b3 | 4b979b347b67a2507d3c26c91852b300cdd20975 | refs/heads/master | 2023-08-20T13:25:39.914325 | 2021-10-12T08:38:28 | 2021-10-12T08:38:28 | 416,251,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,424 | py | import datetime
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models import Q
from django.utils import timezone
from apps.branches.models import Branch
from apps.courses.models import Course
from apps.finances import choices
from apps.finances.models.expense_tags import ExpenseTag
from apps.finances.models.wallet import Wallet
from apps.students.models import Student
from utils import generators
User = get_user_model()
class Transaction(models.Model):
created_date = models.DateTimeField(
default=timezone.now,
verbose_name='Дата создания'
)
title = models.CharField(
max_length=255,
verbose_name="Наименование транзакции",
db_index=True
)
transaction_id = models.CharField(
max_length=255,
db_index=True,
blank=True,
unique=True,
verbose_name='ID транзакции'
)
amount = models.DecimalField(
max_digits=100,
decimal_places=2,
default=0.00,
verbose_name='Сумма'
)
confirmation = models.FileField(
upload_to=generators.generate_document_filename,
null=True, blank=True,
verbose_name="Прикрепите фото или скан чека"
)
comment = models.TextField(
blank=True, null=True,
verbose_name='Комментарии'
)
user = models.ForeignKey(
User,
related_name='transactions',
on_delete=models.SET_NULL,
null=True,
verbose_name='Кто создал',
limit_choices_to=(
Q(is_administrator=True) |
Q(is_staff_member=True) |
Q(is_superuser=True)
)
)
student = models.ForeignKey(
Student,
verbose_name='Студент',
related_name='transactions',
on_delete=models.SET_NULL,
null=True, blank=True
)
course = models.ForeignKey(
Course,
verbose_name='Курс',
related_name='transactions',
on_delete=models.SET_NULL,
null=True, blank=True
)
wallet = models.ForeignKey(
Wallet,
verbose_name='Кошелек',
related_name='transactions',
on_delete=models.SET_NULL,
null=True
)
method = models.CharField(
'Метод',
max_length=255,
choices=choices.METHOD_CHOICES,
blank=True, null=True
)
categories = models.ManyToManyField(
ExpenseTag,
verbose_name='Категории расхода',
related_name='transactions',
blank=True,
)
transaction_type = models.CharField(
'Тип транзакции',
max_length=10,
choices=choices.TRANSACTION_CHOICES
)
branch = models.ForeignKey(
Branch,
on_delete=models.SET_NULL,
verbose_name='Филиал',
related_name='transactions',
null=True
)
class Meta:
verbose_name = 'Транзакция'
verbose_name_plural = 'Транзакции'
ordering = ['-id']
def __str__(self):
return self.transaction_id
def save(self, *args, **kwargs):
trn_id = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
self.transaction_id = f'trn-{trn_id}'
super(Transaction, self).save(*args, **kwargs)
| [
"oichiev.edzen@gmail.com"
] | oichiev.edzen@gmail.com |
b74670afae7e3e55ac606a2102310e4ab4eb2d37 | 37b3b5d71b121a667522604483254c237cb08d99 | /Read-Search-Ask/Python/数据结构与算法/1-概念/3-列表和字典.py | 59ff7056262039628143422e62b16ae1a670b2e6 | [] | no_license | chanwanxiang/isMe | 949a4b2c10f6c908e7fa529918445e9449aba259 | 9586c7a4d5045bd371bbe15991f42e7be68697c3 | refs/heads/master | 2023-07-28T16:34:12.035814 | 2021-09-10T04:00:10 | 2021-09-10T04:00:10 | 118,423,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,375 | py | # python内置类型性能分析
# timeit模块
# 可以用来测试一小段Python代码执行速度
# class timeit.Timer(stmt='pass',setup='pass',timer=<timer function>)
# Timer是测试小段代码执行速度的类
# stmt参数是要测试的代码语句(statment)
# setup参数是运行代码是需要的设置
# timer参数是一个定时器函数,和平台有关
# timeit.Timer.timeit(number=1000)
# Timer类中测试语句执行速度的对象方法,number参数是测试代码时的测试次数,默认为1000000次.方法返回执行代码的平均时耗,返回一个float类型的秒数
# from timeit import Timer
# def test1():
# ls = []
# for i in range(10000):
# ls.append(i)
# def test2():
# ls = []
# for i in range(10000):
# ls = ls + [i]
# def test3():
# ls = [i for i in range(10000)]
# def test4():
# ls = list(range(10000))
# def test5():
# ls = []
# for i in range(10000):
# ls.extend([i])
# timer1 = Timer('test1()','from __main__ import test1')
# print('append:',timer1.timeit(1000))
# timer2 = Timer('test2()','from __main__ import test2')
# print('ls.add:',timer2.timeit(1000))
# timer3 = Timer('test3()','from __main__ import test3')
# print('列表推导:',timer3.timeit(1000))
# timer4 = Timer('test4()','from __main__ import test4')
# print('list(range(number)):',timer4.timeit(1000))
# timer5 = Timer('test5()','from __main__ import test5')
# print('extent:',timer5.timeit(1000))
# 运行结果
# append: 0.6040402
# ls.add: 218.9780432000000001
# 列表推导: 0.3399182999999999
# list(range(number)): 0.20767249999999993
# extent: 1.2539079000000002
# def test6():
# ls = []
# for i in range(10000):
# ls.append(i)
# def test7():
# ls = []
# for i in range(10000):
# ls.insert(0,i)
# timer6 = Timer('test6()','from __main__ import test6')
# print('append:',timer6.timeit(1000))
# timer7 = Timer('test7()','from __main__ import test7')
# print('insert:',timer7.timeit(1000))
# 运行结果
# append: 0.5967673
# insert: 27.2837601
# list内置操作的时间复杂度
# Operation Big-O Efficiency Remarks
# index[] O(1) 索引取值
# index assignment O(1) 索引赋值
# append O(1)
# pop O(1) 移除元素(默认最后一个)
# pop(i) O(n) 移除索引值的元素
# insert(i,item) O(n) 指定对象插入列表指定位置
# del operator O(n)
# iteration O(n)
# contains(in) O(n) 判断是否包含某个对象
# get Slice[x,y] O(k) 切片
# del Slice O(n)
# set Slice O(n+k)
# reverse O(n)
# concatenate O(k) 列表拼接
# sort O(nlogn)
# multiply O(nk) 列表相乘
# dict内置操作的时间复杂度
# Operation Big-O Efficiency Remarks
# copy O(n)
# get item O(1)
# set item O(1)
# del item O(1)
# contains(in) O(1)
# iteration O(n)
| [
"595366700@qq.com"
] | 595366700@qq.com |
2eeaf4e5a95badaf713fea9e6985db5ab31f02aa | 0c1ec32d0f08872ef3ca54830e33a2fdbe585f78 | /DeepForest/h5_generator.py | f78c6a5f120f7f8d40208fbc1c43f1c08a05bf0c | [] | no_license | jtpils/DeepForest | fa35bfab1e6bec13d843841aabd84f8fbc1af8bf | 932a84d604c941a084efdc0f729ae7868baf0309 | refs/heads/master | 2020-05-31T21:28:14.461505 | 2019-05-20T17:03:17 | 2019-05-20T17:03:17 | 190,498,747 | 1 | 0 | null | 2019-06-06T02:14:42 | 2019-06-06T02:14:41 | null | UTF-8 | Python | false | false | 7,350 | py | """
On the fly generator. Crop out portions of a large image, and pass boxes and annotations. This follows the csv_generator template. Satifies the format in generator.py
"""
import pandas as pd
import h5py
from keras_retinanet.preprocessing.generator import Generator
from keras_retinanet.utils.image import read_image_bgr
from keras_retinanet.utils.visualization import draw_annotations
import numpy as np
from PIL import Image
from six import raise_from
import random
import csv
import sys
import os.path
import cv2
import slidingwindow as sw
import itertools
from DeepForest import Lidar
from DeepForest.utils import image_utils
class H5Generator(Generator):
""" Generate data for a custom h5 dataset.
"""
def __init__(
self,
data,
DeepForest_config,
group_method="none",
name=None,
**kwargs
):
""" Initialize a data generator.
"""
self.image_names = []
self.image_data = {}
self.name = name
self.windowdf = data
self.DeepForest_config = DeepForest_config
#Holder for the group order, after shuffling we can still recover loss -> window
self.group_order = {}
self.group_method=group_method
#Holder for image path, keep from reloading same image to save time.
self.previous_image_path=None
#Turn off lidar checking during prediction for training sets.
self.with_lidar=False
#Read classes
self.classes={"Tree": 0}
#Create label dict
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
#Set groups at first order.
self.define_groups(shuffle=False)
#report total number of annotations
self.total_trees = self.total_annotations()
super(H5Generator, self).__init__(**kwargs)
def __len__(self):
"""Number of batches for generator"""
return len(self.groups)
def size(self):
""" Size of the dataset.
"""
image_data= self.windowdf.to_dict("index")
image_names = list(image_data.keys())
return len(image_names)
def num_classes(self):
""" Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def total_annotations(self):
""" Find the total number of annotations for the dataset
"""
#Find matching annotations
tiles = self.windowdf[["tile","site"]].drop_duplicates()
total_annotations = 0
#Select annotations
#Optionally multiple h5 dirs
for index, row in tiles.iterrows():
h5_dir = self.DeepForest_config[row["site"]]["h5"]
tilename = row["tile"]
csv_name = os.path.join(h5_dir, os.path.splitext(tilename)[0]+'.csv')
try:
annotations = pd.read_csv(csv_name)
except Exception as e:
print(e)
print("The csv named {} from tilename {} encountered an error when counting annotations".format(csv_name, tilename))
continue
selected_annotations = pd.merge(self.windowdf, annotations)
total_annotations += len(selected_annotations)
print("There are a total of {} tree annotations in the {} generator".format(total_annotations, self.name))
return(total_annotations)
def define_groups(self, shuffle=False):
'''
Define image data and names based on grouping of tiles for computational efficiency
'''
#group by tile
groups = [df for _, df in self.windowdf.groupby('tile')]
if shuffle:
#Shuffle order of windows within a tile
groups = [x.sample(frac=1) for x in groups]
#Shuffle order of tiles
random.shuffle(groups)
#Bring pandas frame back together
newdf = pd.concat(groups).reset_index(drop=True)
image_data = newdf.to_dict("index")
image_names = list(image_data.keys())
return(image_data, image_names)
def load_image(self, image_index):
""" Load an image at the image_index.
"""
#Select sliding window and tile
try:
image_name = self.image_names[image_index]
except Exception as e:
print("Failed on image index {}".format(image_index))
print("There are {} names in the image names object".format(len(self.image_names)))
self.row = self.image_data[image_name]
#Open image to crop
##Check if tile the is same as previous draw from generator, this will save time.
if not self.row["tile"] == self.previous_image_path:
print("Loading new h5: %s" % (self.row["tile"]))
#Set directory based on site
h5_dir = self.DeepForest_config[self.row["site"]]["h5"]
#tilename for h5 and csv files
tilename = os.path.split(self.row["tile"])[-1]
tilename = os.path.splitext(tilename)[0]
h5_name = os.path.join(h5_dir, tilename+'.h5')
csv_name = os.path.join(h5_dir, tilename+'.csv')
#Read h5
self.hf = h5py.File(h5_name, 'r')
#Read corresponding csv labels
self.annotations = pd.read_csv(csv_name)
#read image from h5
window = self.row["window"]
image = self.hf["train_imgs"][window,...]
#Save image path for next evaluation to check
self.previous_image_path = self.row["tile"]
return image
def load_annotations(self, image_index):
'''
Load annotations from csv file
'''
#Select sliding window and tile
image_name = self.image_names[image_index]
self.row = self.image_data[image_name]
#Find annotations
annotations = self.annotations.loc[(self.annotations["tile"] == self.row["tile"]) & (self.annotations["window"] == self.row["window"])]
return annotations[["0","1","2","3","4"]].values
def compute_windows(self):
''''
Create a sliding window object for reference
'''
#Load tile
site = self.annotation_list.site.unique()[0]
base_dir = self.DeepForest_config[site][self.name]["RGB"]
image = os.path.join(base_dir, self.annotation_list.rgb_path.unique()[0])
im = Image.open(image)
numpy_image = np.array(im)
#Generate sliding windows
windows = sw.generate(numpy_image, sw.DimOrder.HeightWidthChannel, self.DeepForest_config["patch_size"], self.DeepForest_config["patch_overlap"])
return(windows)
| [
"benweinstein2010@gmail.com"
] | benweinstein2010@gmail.com |
27e534559619c7b1c3f7751def4cd7a078bbfea9 | 8567438779e6af0754620a25d379c348e4cd5a5d | /chrome/android/java/DEPS | 8c434f90f8f274fea473dac3ed786c8c960e3387 | [
"BSD-3-Clause"
] | permissive | thngkaiyuan/chromium | c389ac4b50ccba28ee077cbf6115c41b547955ae | dab56a4a71f87f64ecc0044e97b4a8f247787a68 | refs/heads/master | 2022-11-10T02:50:29.326119 | 2017-04-08T12:28:57 | 2017-04-08T12:28:57 | 84,073,924 | 0 | 1 | BSD-3-Clause | 2022-10-25T19:47:15 | 2017-03-06T13:04:15 | null | UTF-8 | Python | false | false | 997 | include_rules = [
"+components/autofill/android/java/src/org/chromium/components/autofill",
"+components/background_task_scheduler/android/java/src/org/chromium/components/background_task_scheduler",
"+components/bookmarks/common/android/java/src/org/chromium/components/bookmarks",
"+components/dom_distiller/content/browser/android/java/src/org/chromium/components/dom_distiller/content",
"+components/dom_distiller/core/android/java/src/org/chromium/components/dom_distiller/core",
"+components/gcm_driver/android/java/src/org/chromium/components/gcm_driver",
"+components/location/android/java",
"+components/minidump_uploader",
"+components/navigation_interception",
"+components/precache/android/java",
"+components/safe_json/android/java",
"+components/sync/android/java/src/org/chromium/components/sync",
"+components/web_contents_delegate_android",
"+components/web_restrictions",
"+content/public/android/java",
"+services/service_manager/public/java",
]
| [
"hedonist.ky@gmail.com"
] | hedonist.ky@gmail.com | |
75d852a9b0694fc982f022a5ed972bc5949ed8eb | 3a29caaf19333f0623a8a6a26fbcf8ea14b9212f | /powerapp/core/apps.py | 1500ed69d40b50a8603db7edb3e197d6c905651c | [] | no_license | WisdomWolf/powerapp | 1f695e315fde0937ded0bd4194755bbc6ad6e1a1 | b287e5b3c51f649580ae81a21aa68c350049b73b | refs/heads/master | 2021-01-21T06:18:48.504225 | 2015-06-08T18:18:44 | 2015-06-08T18:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,564 | py | # -*- coding: utf-8 -*-
import re
import datetime
from collections import namedtuple
from importlib import import_module
from logging import getLogger
from django import apps
from django.conf import settings
from django.conf.urls import url, include
from django.utils.six import with_metaclass
from powerapp.core.app_signals import ServiceAppSignals
logger = getLogger(__name__)
class LoadModuleMixin(object):
"""
A mixin for an app to load any of its submodule
"""
def load_module(self, name, quiet=True):
"""
A helper to load any app's submodule by its name
"""
full_name = '%s.%s' % (self.name, name)
try:
return import_module(full_name)
except ImportError:
if quiet:
return None
raise
class AppConfig(LoadModuleMixin, apps.AppConfig):
"""
App Config for the powerapp.core app itself
"""
name = 'powerapp.core'
verbose_name = 'PowerApp core application'
def ready(self):
# import the submodule with cron tasks
self.load_module('cron')
# import the submodule with signal handlers
self.load_module('signals')
# import the submodule with OAuth implementations
self.load_module('oauth_impl')
class ServiceAppConfigMeta(type):
"""
A metaclass to create the ServiceAppConfig.
We need this for two reasons:
1. to create new objects for every signal in every subclass
2. to have a personal periodic task registry for every subclass we have
"""
def __new__(mcs, name, bases, attrs):
attrs['signals'] = ServiceAppSignals()
attrs['periodic_tasks'] = {}
return type.__new__(mcs, name, bases, attrs)
class ServiceAppConfig(with_metaclass(ServiceAppConfigMeta, LoadModuleMixin, apps.AppConfig)):
"""
Base class for the application config object of services
"""
#: A special flag to denote that current Django app represents a
#: powerapp service
service = True
#: This flag has to be set to True if the application is "stateless"
#: Stateless application reacts immediately on webhooks, it's easier to
#: scale, but this app doesn't keep local model in sync, and you cannot
#: perform queries such as "api.items.all(...)" against it.
#:
#: We in Todoist love stateless apps, because Sync queries are kind of
#: expensive for us, so we encourage everyone to use this flag :)
stateless = True
#: The registry of powerapp signals. We overwrite it in metaclass anyway,
#: but this way it provides hints for IDEs
signals = ServiceAppSignals()
#: The registry of periodic tasks. We overwrite it in metaclass as well
periodic_tasks = {}
""":type: dict[str,PeriodicTaskFun]"""
def urlpatterns(self):
"""
Returns the list of URL patterns which have to be added to main urls.py
By default returns a sigle URL pattern which mounts app's urls.py as
under the app's label path. Most likely you don't need to edit this
function.
"""
regex = r'^%s/' % self.label
urls_module = '%s.urls' % self.name
ns = self.label
return [url(regex, include(urls_module, namespace=ns, app_name=ns))]
def ready(self):
"""
A signal called by the constructor once the app instance is ready
(once it's registered)
"""
logger.debug('Application %s is ready', self.name)
# export app settings
self.export_settings()
# import the submodule with signal handlers
self.load_module('signals')
def export_settings(self):
re_variable = re.compile(r'^[A-Z0-9_]+$')
for key, value in self.__class__.__dict__.items():
if re_variable.match(key) and not hasattr(settings, key):
setattr(settings, key, value)
@classmethod
def periodic_task(cls, delta, name=None):
"""
A decorator to add a periodic task. Decorated function has to accept
two arguments: user and integration object
"""
if isinstance(delta, int):
delta = datetime.timedelta(seconds=delta)
def decorator(func):
registry_name = name or '%s.%s' % (func.__module__, func.__name__)
cls.periodic_tasks[registry_name] = PeriodicTaskFun(func, delta, registry_name)
return func
return decorator
#: A wrapper for periodic tasks
PeriodicTaskFun = namedtuple('PeriodicTaskFun', ['func', 'delta', 'name'])
| [
"roman.imankulov@gmail.com"
] | roman.imankulov@gmail.com |
30447e0dd7ecb10608749d5b46d4e1e7bd9019cc | f4335b5f682041a10f507401912a106fea7ad435 | /scripts/retriever/build_db.py | 1804c2b64fb8b07f4c1123b004f2ac55feed659a | [
"MIT"
] | permissive | Shuailong/RLQA | 27c4779518233f96b77a7d5af999c2f1e085a0b0 | 014c340aea9d27494e65e8329da61ebccd65db61 | refs/heads/master | 2021-04-12T09:57:41.235391 | 2018-11-17T13:14:56 | 2018-11-17T13:14:56 | 126,332,609 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,604 | py | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# Adapt from facebookresearch/DrQA by Shuailong on Mar 22 2018.
"""A script to read in and store documents in a sqlite database."""
import argparse
import sqlite3
import json
import os
import logging
import importlib.util
from multiprocessing import Pool as ProcessPool
from tqdm import tqdm
from rlqa.retriever import utils
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Import helper
# ------------------------------------------------------------------------------
PREPROCESS_FN = None
def init(filename):
global PREPROCESS_FN
if filename:
PREPROCESS_FN = import_module(filename).preprocess
def import_module(filename):
"""Import a module given a full path to the file."""
spec = importlib.util.spec_from_file_location('doc_filter', filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# ------------------------------------------------------------------------------
# Store corpus.
# ------------------------------------------------------------------------------
def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path)
def get_contents(filename):
"""Parse the contents of a file. Each line is a JSON encoded document."""
global PREPROCESS_FN
documents = []
with open(filename) as f:
for line in f:
# Parse document
doc = json.loads(line)
# Maybe preprocess the document with custom function
if PREPROCESS_FN:
doc = PREPROCESS_FN(doc)
# Skip if it is empty or None
if not doc:
continue
# Add the document
documents.append((utils.normalize(doc['id']), doc['text']))
return documents
def store_contents(data_path, save_path, preprocess, num_workers=None):
"""Preprocess and store a corpus of documents in sqlite.
Args:
data_path: Root path to directory (or directory of directories) of files
containing json encoded documents (must have `id` and `text` fields).
save_path: Path to output sqlite db.
preprocess: Path to file defining a custom `preprocess` function. Takes
in and outputs a structured doc.
num_workers: Number of parallel processes to use when reading docs.
"""
if os.path.isfile(save_path):
raise RuntimeError('%s already exists! Not overwriting.' % save_path)
logger.info('Reading into database...')
conn = sqlite3.connect(save_path)
c = conn.cursor()
c.execute("CREATE TABLE documents (id PRIMARY KEY, text);")
workers = ProcessPool(num_workers, initializer=init, initargs=(preprocess,))
files = [f for f in iter_files(data_path)]
count = 0
with tqdm(total=len(files)) as pbar:
for pairs in tqdm(workers.imap_unordered(get_contents, files)):
count += len(pairs)
c.executemany("INSERT INTO documents VALUES (?,?)", pairs)
pbar.update()
logger.info('Read %d docs.' % count)
logger.info('Committing...')
conn.commit()
conn.close()
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_path', type=str, help='/path/to/data')
parser.add_argument('save_path', type=str, help='/path/to/saved/db.db')
parser.add_argument('--preprocess', type=str, default=None,
help=('File path to a python module that defines '
'a `preprocess` function'))
parser.add_argument('--num-workers', type=int, default=None,
help='Number of CPU processes (for tokenizing, etc)')
args = parser.parse_args()
store_contents(
args.data_path, args.save_path, args.preprocess, args.num_workers
)
| [
"liangshuailong@gmail.com"
] | liangshuailong@gmail.com |
8af6f695bb0775773af909b521f2f19323fcd8da | f847abc060c56cbb14be69fbf1ed671caeda23e8 | /1-daemon.py | 90bfc2b6a242c008ba14fbd83dd48a7a86f0213a | [
"MIT"
] | permissive | ko9ma7/smartstore-automate | b4ade5b6c3ce2ea3f0345287a82ccbb006b25d82 | 3a222a4e81c08658e1c9be156d2814a1df0c71d7 | refs/heads/master | 2021-04-15T02:09:59.390417 | 2019-11-19T09:01:40 | 2019-11-19T09:01:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,128 | py | import argparse
import logging
import os
import signal
import sys
import time
class SmartStore:
def __init__(self, log_file=None):
logging.basicConfig(level=logging.INFO, format='%(message)s')
self.logger = logging.getLogger('SmartStore')
self.log_file = log_file
if log_file:
self.log_handler = logging.FileHandler(self.log_file)
self.logger.addHandler(self.log_handler)
self.__stop = False
signal.signal(signal.SIGINT, self.stop)
signal.signal(signal.SIGTERM, self.stop)
def main(self):
i = 0
self.logger.info('Start Singing, PID {}'.format(os.getpid()))
while not self.__stop:
self.logger.info(i)
i += 1
time.sleep(1)
def stop(self, signum, frame):
# SIGINT, SIGTERM 시그널 수신 종료 핸들러
self.__stop = True
self.logger.info('Receive Signal {}'.format(signum))
self.logger.info('Stop Singing')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pid', help='pid filename', required=True)
parser.add_argument('--log', help='log filename', default=None)
args = parser.parse_args()
store = SmartStore(args.log)
store.main()
# 첫 번째 fork
pid = os.fork()
if pid > 0:
# 부모 프로세스 그냥 종료
exit(0)
else:
# 부모 환경과 분리
os.chdir('/')
os.setsid()
os.umask(0)
# 두 번째 fork
pid = os.fork()
if pid > 0:
exit(0)
else:
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
with open(args.pid, 'w') as pid_file:
pid_file.write(str(os.getpid()))
store = SmartStore(args.log)
store.main()
| [
"pincoins@gmail.com"
] | pincoins@gmail.com |
7551c8ef2c19ceebb70042c614d045fa4fee0b7b | c91c5e6e33303bc57edceb955f184a5a43e3c030 | /policy_repository/policy_repository/settings.py | 3cb225113af4538daabbf5165385c3faed2a2095 | [] | no_license | rahuezo/policy_repository | dd429bc8b2c6641d9812e5c5e87f913380c1d7e9 | 6fdcdee2992e9980c1c023b59cec8f2ab6f9adc9 | refs/heads/master | 2020-03-29T02:14:12.244711 | 2017-08-08T23:52:14 | 2017-08-08T23:52:14 | 94,568,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | """
Django settings for policy_repository project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9%@h7jy2dpcz$%5^4@pfb6e+nbm@wzq$zq%(0e*mh4319z0mgz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polrep',
'accounts',
'configuration',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'policy_repository.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'policy_repository.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
| [
"rahuezo@ucdavis.edu"
] | rahuezo@ucdavis.edu |
78937c75818ffbb9a0e5c7b4d5689c7b0401e83c | 4af454bced0f99e4ed8269d71e97284f0ef13afb | /gameserver/packets/from_server/base.py | 0c23afaf92d63ce56115425cb793fea9553609e4 | [] | no_license | L2jBrasil/L2py | c46db78238b4caf272a2399f4e4910fc256b3cca | d1c2e7bddb54d222f9a3d04262c09ad70329a226 | refs/heads/master | 2022-11-19T01:39:02.019777 | 2020-07-24T20:07:15 | 2020-07-24T20:07:15 | 292,115,581 | 1 | 1 | null | 2020-09-01T21:53:54 | 2020-09-01T21:53:54 | null | UTF-8 | Python | false | false | 385 | py | from common.packet import add_length, Packet
from gameserver.crypt.xor import xor_encrypt_game
class GameServerPacket(Packet):
@add_length
@xor_encrypt_game
# @add_padding()
def encode(self, client):
return self.body
@classmethod
def parse(cls, data, client):
pass
@classmethod
def decode(cls, data, client, **kwargs):
pass
| [
"yurzs@icloud.com"
] | yurzs@icloud.com |
33acf2dbdce1cad3789c71937820056da49397a9 | 2279440aae28b1934c78797421948d1ee2a50422 | /scraping/labs/lab8/tripadvisor/tripadvisor/spiders/comments.py | 3eb7ddb0eb422ff455a1dbe08a730d189de5c91c | [] | no_license | yeladlouni/m2i | a024e3f740977ae27675d11d4d4d5dacecf59705 | d245ffd76f5b4f2a7f8d37821b89dedbfcd81b69 | refs/heads/master | 2023-02-12T07:00:12.712614 | 2021-01-07T13:52:08 | 2021-01-07T13:52:08 | 308,609,873 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | import scrapy
class CommentsSpider(scrapy.Spider):
name = 'comments'
allowed_domains = ['tripadvisor.fr']
start_urls = ['http://tripadvisor.fr/']
def parse(self, response):
pass
| [
"="
] | = |
d2a906e840c696628c03915e80ab6534e291d253 | a3eb732ead7e1d10a85a88e42dc639eb16a40265 | /instagram_api/response/model/comment_translations.py | bbf2090de07a7f8e908de3c4cdb81372d7bf77c0 | [
"MIT"
] | permissive | carsam2021/instagram_api | 7654c0f485c22935cf478016e46e65acbeda9344 | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | refs/heads/master | 2023-03-16T14:06:27.515432 | 2020-10-17T04:39:19 | 2020-10-17T04:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['CommentTranslations', 'CommentTranslationsInterface']
class CommentTranslationsInterface(ApiInterfaceBase):
id: int
translation: AnyType
class CommentTranslations(PropertyMapper, CommentTranslationsInterface):
pass
| [
"root@proscript.ru"
] | root@proscript.ru |
6fb35da6b38e5e8a685ad2692319d35dd249394e | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/NCName/Schema+Instance/NISTXML-SV-IV-atomic-NCName-enumeration-5-4.py | 9e5ef92703e0c0d18b9c5344a4ce6a336bbc0dbb | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 568 | py | from output.models.nist_data.atomic.ncname.schema_instance.nistschema_sv_iv_atomic_ncname_enumeration_5_xsd.nistschema_sv_iv_atomic_ncname_enumeration_5 import NistschemaSvIvAtomicNcnameEnumeration5
from output.models.nist_data.atomic.ncname.schema_instance.nistschema_sv_iv_atomic_ncname_enumeration_5_xsd.nistschema_sv_iv_atomic_ncname_enumeration_5 import NistschemaSvIvAtomicNcnameEnumeration5Type
obj = NistschemaSvIvAtomicNcnameEnumeration5(
value=NistschemaSvIvAtomicNcnameEnumeration5Type.KOBJECT_TRANSACT_CONSTITUENT_OF_FILE_IS_WITHOUT_ABOUT_ARE_A_BE
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
a7f074b9ed6884b55e0d38eb262a82430585a1cd | 9ba68903665929f72d78409bdf9b8ff3733a6746 | /scientific_expedition/task19_yaml_simple_dict_v2.py | 4ef84703380890bb71ec4dcb6fdd4993de042f6a | [] | no_license | DorogAD/checkio | eed8ae9865dda45d2cb0a4201d51fb45e91aec8a | 5a151f861746dbd2e838dea40a30c20dbdeaa399 | refs/heads/main | 2023-02-27T21:11:50.638125 | 2021-02-07T11:04:04 | 2021-02-07T11:04:04 | 315,864,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | """
Have you ever heard of such markup language as YAML? It’s a friendly data serialization format. In fact it’s so friendly
that both people and programs can read it quite well. You can play around with the standard by following this link.
YAML is a text, and you need to convert it into an object. But I’m not asking you to implement the entire YAML standard,
we’ll implement it step by step.
The first step is the key-value conversion. The key can be any string consisting of Latin letters and numbers.
The value can be a single-line string (which consists of spaces, Latin letters and numbers) or a number (int).
I’ll show some examples:
name: Alex
age: 12
Converted into an object.
{
"name": "Alex",
"age": 12
}
Note that the number automatically gets type int
Another example shows that the string may contain spaces.
name: Alex Fox
age: 12
class: 12b
Will be converted into the next object.
{
"age": 12,
"name": "Alex Fox",
"class": "12b"
}
Pay attention to a few things. Between the string "age" and the string "class" there is an empty string that doesn’t
interfere with parsing. The class starts with numbers, but has letters, which means it cannot be converted to numbers,
so its type remains a string (str).
Input: A format string.
Output: An object.
"""
def yaml(a: str) -> dict:
result = {}
for pair in a.split('\n'):
if pair == '':
continue
else:
result_key, result_value = pair.split(': ')
result[result_key] = int(result_value) if result_value.isdigit() else result_value
return result
if __name__ == '__main__':
print("Example:")
print(yaml("""name: Alex
age: 12"""))
# These "asserts" are used for self-checking and not for an auto-testing
assert yaml("""name: Alex
age: 12""") == {'age': 12, 'name': 'Alex'}
assert yaml("""name: Alex Fox
age: 12
class: 12b""") == {'age': 12,
'class': '12b',
'name': 'Alex Fox'}
print("Coding complete? Click 'Check' to earn cool rewards!")
| [
"sa_do@tut.by"
] | sa_do@tut.by |
9ff9b342d2e32f0f5814c37a460c480096cf8637 | 2a54a1d9996778362421299a936bb0dadaace958 | /units/adms/mysite/video/models/__init__.py | ef77e1a3c15686119e05351b1c721d4a259ea750 | [] | no_license | icprog/zktime_wlm | 6d0719b5210c4d3196b5958bccbb7e606785ece3 | 449c487ce4664dde734f8007a974ed883801d106 | refs/heads/master | 2021-03-21T10:20:54.157131 | 2018-11-24T04:10:42 | 2018-11-24T04:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from models import VideoPreviewPage, VideoLinkagePage
verbose_name = _(u"视频")
_menu_index = 5
def app_options():
from base.options import SYSPARAM, PERSONAL
return (
#参数名称, 参数默认值,参数显示名称,解释
('video_default_page', 'video/VideoPreviewPage/', u"%s"%_(u'视频默认页面'), "", PERSONAL, False),
)
| [
"657984027@qq.com"
] | 657984027@qq.com |
b0b371a2160af839a1375cb22c786957b9801837 | 6ab217b675b0d33dec9d8985efc2de314e3a7a28 | /menus/models/menu/models.py | a34c7d7e3baa63430deb2a70792201dff695a8c7 | [] | no_license | nujkram/dream_cream_pastries | 3547928af859ebbb93f8d6ff64d02796d8c61a0c | c6a764f4f2c16191661ee6747dc0daa896eae5ec | refs/heads/master | 2023-06-20T20:20:21.001373 | 2021-07-29T00:55:49 | 2021-07-29T00:55:49 | 375,721,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py | """
Dream Cream Pastries Project
Menu 0.0.1
Menu models
Menu
Author: Mark
"""
import uuid as uuid
from django.urls import reverse
from django_extensions.db.fields import AutoSlugField
from django.db.models import CharField
from django.db.models import DateTimeField
from django_extensions.db.fields import AutoSlugField
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.contrib.auth import models as auth_models
from django.db import models as models
from django_extensions.db import fields as extension_fields
from django.apps import apps
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.contrib.postgres.fields import JSONField
from .managers import MenuManager as manager
class Menu(models.Model):
# === Basic ===
created = models.DateTimeField(null=False, auto_now_add=True)
updated = models.DateTimeField(null=False, auto_now=True)
# === Identifiers ===
name = models.CharField(max_length=150)
uuid = models.UUIDField(unique=True, default=uuid.uuid4, null=True, editable=True)
slug = extension_fields.AutoSlugField(populate_from='name', blank=True)
# === Properties ===
price = models.DecimalField(decimal_places=2, max_digits=5)
# === State ===
is_best = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
meta = JSONField(default=dict, blank=True, null=True)
# === Relationship Fields ===
category = models.ForeignKey(
'menus.MenuCategory',
null=True,
db_index=False,
on_delete=models.SET_NULL,
related_name='menus_created_by_user'
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
db_index=False,
on_delete=models.SET_NULL,
related_name='menus_created_by_user'
)
last_updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
db_index=False,
on_delete=models.SET_NULL,
related_name='menus_updated_by_user'
)
objects = manager()
class Meta:
ordering = ('-created',)
verbose_name = 'Menu'
verbose_name_plural = 'Menus'
################################################################################
# === Magic Methods ===
################################################################################
def __str__(self):
return self.name
################################################################################
# === Model overrides ===
################################################################################
def clean(self, *args, **kwargs):
# add custom validation here
super().clean()
def save(self, *args, **kwargs):
# self.full_clean()
super().save(*args, **kwargs)
################################################################################
# === Model-specific methods ===
################################################################################
################################################################################
# === Signals ===
################################################################################
@receiver(post_save, sender=Menu)
def scaffold_post_save(sender, instance=None, created=False, **kwargs):
pass
@receiver(pre_save, sender=Menu)
def scaffold_pre_save(sender, instance=None, created=False, **kwargs):
pass
| [
"markjungersaniva@gmail.com"
] | markjungersaniva@gmail.com |
4acd8ea6336e7bf02139a24dd366e5ebe87c3059 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03126/s809270834.py | 61cb5d08d9d531977246b15447ef2eccf036d8fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | N, M = map(int, input().split())
L = []
for i in range(N):
L.append(list(map(int, input().split())))
S = [0]*M
for j in range (N):
for k in range(1, L[j][0]+1):
S[L[j][k]-1] += 1
print(S.count(N)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c3c871f5185f3314548f9bbb499779353b18d90e | 90cea58e80309d2dff88f73f3a43ed5f943ff97d | /PalindromeMinInsertions_v1.py | 004cc7d2f2f6edadef6d3f7965a8554044d71501 | [] | no_license | SaiSujithReddy/CodePython | 0b65c82b0e71dba2bbd4c1aefec4e6cd6fd42341 | 4c05b7909092009afffa4536fd284060d20e462d | refs/heads/master | 2022-02-24T09:21:15.284745 | 2019-10-07T23:36:17 | 2019-10-07T23:36:17 | 106,611,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,401 | py | #http://isharemylearning.blogspot.com/2012/08/minimum-number-of-insertions-in-string.html
'''
This problem can be two steps
step 1:- find longest common subsequence
step 2:- total string - common sub sequence = no of insertions
'''
#Things learnt
# creating a matrix and initializing with zeores
import math
def find_common_subsequence(str1,str2):
i = len(str1)+1
j = len(str2)+1
matrix = [[0] * i for x in range(j)]
print(matrix)
# Another way of initializing the matrix with None or zero
matrix_v2 = []
matrix_v1 = [0] * i
for x in range(j):
matrix_v2.append(matrix_v1)
print(matrix_v2)
#matrix[0][0] = 0
for x in range(i):
for y in range(j):
print("x values is {}, y value is {}",x,y)
if x == 0 or y == 0:
matrix[y][x] = 0
elif str1[x-1] == str2[y-1]:
matrix[x][y] = matrix[x-1][y-1] + 1
else:
matrix[x][y] = max(matrix[x][y-1],matrix[x-1][y])
print(matrix)
print("Value of i j are", i ,j)
print(matrix[i-1][j-1])
return matrix[i-1][j-1]
string = "hotoh"
print(string[::-1])
common = find_common_subsequence(string,string[::-1])
min_insertions = len(string) - common
print("min_insertions is ",min_insertions)
# hello
#heolloeh
#min insertions = 3
# hi
# hih
#min insertions = 1
# abcdd - 3
# abcd - 3
# hotoh
| [
"sai.marapareddy@gmail.com"
] | sai.marapareddy@gmail.com |
41f535ff6ce551b3a9e0a46df9f6165c674a3c77 | 28e62867cd8d067f86e1aced1f0bf877abf33e68 | /naive_bayes/sohu_news_topic_classification_using_naive_bayes.py | dca4bf7f516a62c0a9224af2a2a8cf14f85d668a | [] | no_license | xiongfeihtp/scikit_learn | ffd462913deb8abc958311dbd0c13fe755468e51 | e97bbbd26ff47325cc0791ce241e9f1844feba9e | refs/heads/master | 2021-05-15T03:52:54.175922 | 2018-02-02T14:11:06 | 2018-02-02T14:11:06 | 119,989,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,560 | py | #! /usr/bin/env python
#coding=utf-8
# Authors: Hanxiaoyang <hanxiaoyang.ml@gmail.com>
# simple naive bayes classifier to classify sohu news topic
# data can be downloaded in http://www.sogou.com/labs/dl/cs.html
# 代码功能:简易朴素贝叶斯分类器,用于对搜狐新闻主题分类,数据可在http://www.sogou.com/labs/dl/cs.html下载(精简版)
# 详细说明参见博客http://blog.csdn.net/han_xiaoyang/article/details/50629608
# 作者:寒小阳<hanxiaoyang.ml@gmail.com>
import sys, math, random, collections
def shuffle(inFile):
'''
简单的乱序操作,用于生成训练集和测试集
'''
textLines = [line.strip() for line in open(inFile)]
print("正在准备训练和测试数据,请稍后...")
random.shuffle(textLines)
num = len(textLines)
trainText = textLines[:3*num/5]
testText = textLines[3*num/5:]
print("准备训练和测试数据准备完毕,下一步...")
return trainText, testText
#总共有9种新闻类别,我们给每个类别一个编号
lables = ['A','B','C','D','E','F','G','H','I']
def lable2id(lable):
for i in range(len(lables)):
if lable == lables[i]:
return i
raise Exception('Error lable %s' % (lable))
def doc_dict():
'''
构造和类别数等长的0向量
'''
return [0]*len(lables)
def mutual_info(N,Nij,Ni_,N_j):
'''
计算互信息,这里log的底取为2
'''
return Nij * 1.0 / N * math.log(N * (Nij+1)*1.0/(Ni_*N_j))/ math.log(2)
def count_for_cates(trainText, featureFile):
'''
遍历文件,统计每个词在每个类别出现的次数,和每类的文档数
并写入结果特征文件
'''
docCount = [0] * len(lables)
#defaultdict 提供初始化value
wordCount = collections.defaultdict(doc_dict())
#扫描文件和计数
for line in trainText:
#split(str,num) 字符和分割次数
lable,text = line.strip().split(' ',1)
#give the label_id
index = lable2id(lable[0])
#split the word
words = text.split(' ')
for word in words:
wordCount[word][index] += 1
docCount[index] += 1
#计算互信息值
print("计算互信息,提取关键/特征词中,请稍后...")
miDict = collections.defaultdict(doc_dict())
#N word_sum numbers
N = sum(docCount)
for k,vs in wordCount.items():
#k --word vs--num_list for label_id
for i in range(len(vs)):
N11 = vs[i]
N10 = sum(vs) - N11
N01 = docCount[i] - N11
N00 = N - N11 - N10 - N01
mi = mutual_info(N,N11,N10+N11,N01+N11) + mutual_info(N,N10,N10+N11,N00+N10)+ mutual_info(N,N01,N01+N11,N01+N00)+ mutual_info(N,N00,N00+N10,N00+N01)
miDict[k][i] = mi
fWords = set()
for i in range(len(docCount)):
keyf = lambda x:x[1][i]
sortedDict = sorted(miDict.items(),key=keyf,reverse=True)
for j in range(100):
fWords.add(sortedDict[j][0])
out = open(featureFile, 'w')
#输出各个类的文档数目
out.write(str(docCount)+"\n")
#输出互信息最高的词作为特征词
for fword in fWords:
out.write(fword+"\n")
print("特征词写入完毕...")
out.close()
def load_feature_words(featureFile):
'''
从特征文件导入特征词
'''
f = open(featureFile)
#各个类的文档数目
docCounts = eval(f.readline())
features = set()
#读取特征词
for line in f:
features.add(line.strip())
f.close()
return docCounts,features
def train_bayes(featureFile, textFile, modelFile):
'''
训练贝叶斯模型,实际上计算每个类中特征词的出现次数
'''
print("使用朴素贝叶斯训练中...")
docCounts,features = load_feature_words(featureFile)
wordCount = collections.defaultdict(doc_dict())
#每类文档特征词出现的次数
tCount = [0]*len(docCounts)
for line in open(textFile):
lable,text = line.strip().split(' ',1)
index = lable2id(lable[0])
words = text.split(' ')
for word in words:
if word in features:
tCount[index] += 1
wordCount[word][index] += 1
outModel = open(modelFile, 'w')
#拉普拉斯平滑
print("训练完毕,写入模型...")
for k,v in wordCount.items():
scores = [(v[i]+1) * 1.0 / (tCount[i]+len(wordCount)) for i in range(len(v))]
outModel.write(k+"\t"+scores+"\n")
outModel.close()
def load_model(modelFile):
'''
从模型文件中导入计算好的贝叶斯模型
'''
print("加载模型中...")
f = open(modelFile)
scores = {}
for line in f:
word,counts = line.strip().rsplit('\t',1)
scores[word] = eval(counts)
f.close()
return scores
def predict(featureFile, modelFile, testText):
'''
预测文档的类标,标准输入每一行为一个文档
'''
docCounts,features = load_feature_words()
docScores = [math.log(count * 1.0 /sum(docCounts)) for count in docCounts]
scores = load_model(modelFile)
rCount = 0
docCount = 0
print("正在使用测试数据验证模型效果...")
for line in testText:
lable,text = line.strip().split(' ',1)
index = lable2id(lable[0])
words = text.split(' ')
preValues = list(docScores)
for word in words:
if word in features:
for i in range(len(preValues)):
preValues[i]+=math.log(scores[word][i])
#give the index for list
m = max(preValues)
pIndex = preValues.index(m)
if pIndex == index:
rCount += 1
#print lable,lables[pIndex],text
docCount += 1
print("总共测试文本量: %d , 预测正确的类别量: %d, 朴素贝叶斯分类器准确度:%f" %(rCount,docCount,rCount * 1.0 / docCount))
if __name__=="__main__":
if len(sys.argv) != 4:
print("Usage: python sohu_news_topic_classification_using_naive_bayes.py sougou_news.txt feature_file.out model_file.out")
sys.exit()
inFile = sys.argv[1]
featureFile = sys.argv[2]
modelFile = sys.argv[3]
trainText, testText = shuffle(inFile)
count_for_cates(trainText, featureFile)
train_bayes(featureFile, trainText, modelFile)
predict(featureFile, modelFile, testText)
| [
"386344277@qq.com"
] | 386344277@qq.com |
11b0b57780fc2bb8391dfd861237d5444e9240ea | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_chart_display_units04.py | fc0490c067530d89aca6ff3e32dc9bee73d48d33 | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 1,160 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_display_units04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.set_y_axis({'display_units': 'ten_thousands', 'display_units_visible': 0})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
2987363abf0a175a6d50f3ddbd9757b36709b381 | bec8f235b1392542560166dd02c2f0d88c949a24 | /autobahn/autobahn/wamp/uri.py | dacbea3980fd0a55929f7c1838274d21ccae7b8a | [
"Apache-2.0"
] | permissive | gourneau/AutobahnPython | f740f69b9ecbc305a97a5412ba3bb136a4bdec69 | 5193e799179c2bfc3b3f8dda86ccba69646c7ee3 | refs/heads/master | 2021-01-15T22:02:32.459491 | 2014-07-02T13:34:57 | 2014-07-02T13:34:57 | 21,437,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,339 | py | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import re
import six
# TODO:
# suffix matches
# args + kwargs
# uuid converter
# multiple URI patterns per decorated object
# classes: Pattern, EndpointPattern, ..
class Pattern:
"""
A WAMP URI Pattern.
"""
URI_TARGET_ENDPOINT = 1
URI_TARGET_HANDLER = 2
URI_TARGET_EXCEPTION = 3
URI_TYPE_EXACT = 1
URI_TYPE_PREFIX = 2
URI_TYPE_WILDCARD = 3
_URI_COMPONENT = re.compile(r"^[a-z][a-z0-9_]*$")
_URI_NAMED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*)>$")
_URI_NAMED_CONVERTED_COMPONENT = re.compile(r"^<([a-z][a-z0-9_]*):([a-z]*)>$")
def __init__(self, uri, target):
"""
Constructor for WAMP URI pattern.
:param uri: The URI or URI pattern, e.g. `"com.myapp.product.<product:int>.update"`.
:type uri: str
:param target: The target for this pattern: a procedure endpoint (a callable),
an event handler (a callable) or an exception (a class).
"""
assert(type(uri) == six.text_type)
assert(target in [Pattern.URI_TARGET_ENDPOINT,
Pattern.URI_TARGET_HANDLER,
Pattern.URI_TARGET_EXCEPTION])
components = uri.split('.')
pl = []
nc = {}
i = 0
for component in components:
match = Pattern._URI_NAMED_CONVERTED_COMPONENT.match(component)
if match:
ctype = match.groups()[1]
if ctype not in ['string', 'int', 'suffix']:
raise Exception("invalid URI")
if ctype == 'suffix' and i != len(components) - 1:
raise Exception("invalid URI")
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
if ctype in ['string', 'suffix']:
nc[name] = str
elif ctype == 'int':
nc[name] = int
else:
# should not arrive here
raise Exception("logic error")
pl.append("(?P<{}>[a-z0-9_]+)".format(name))
continue
match = Pattern._URI_NAMED_COMPONENT.match(component)
if match:
name = match.groups()[0]
if name in nc:
raise Exception("invalid URI")
nc[name] = str
pl.append("(?P<{}>[a-z][a-z0-9_]*)".format(name))
continue
match = Pattern._URI_COMPONENT.match(component)
if match:
pl.append(component)
continue
raise Exception("invalid URI")
if nc:
# URI pattern
self._type = Pattern.URI_TYPE_WILDCARD
p = "^" + "\.".join(pl) + "$"
self._pattern = re.compile(p)
self._names = nc
else:
# exact URI
self._type = Pattern.URI_TYPE_EXACT
self._pattern = None
self._names = None
self._uri = uri
self._target = target
def uri(self):
"""
Returns the original URI (pattern) for this pattern.
:returns str -- The URI (pattern), e.g. `"com.myapp.product.<product:int>.update"`.
"""
return self._uri
def match(self, uri):
"""
Match the given (fully qualified) URI according to this pattern
and return extracted args and kwargs.
:param uri: The URI to match, e.g. `"com.myapp.product.123456.update"`.
:type uri: str
:returns tuple -- A tuple `(args, kwargs)`
"""
args = []
kwargs = {}
if self._type == Pattern.URI_TYPE_EXACT:
return args, kwargs
elif self._type == Pattern.URI_TYPE_WILDCARD:
match = self._pattern.match(uri)
if match:
for key in self._names:
val = match.group(key)
val = self._names[key](val)
kwargs[key] = val
return args, kwargs
else:
raise Exception("no match")
def is_endpoint(self):
"""
Check if this pattern is for a procedure endpoint.
:returns bool -- `True`, iff this pattern is for a procedure endpoint.
"""
return self._target == Pattern.URI_TARGET_ENDPOINT
def is_handler(self):
"""
Check if this pattern is for an event handler.
:returns bool -- `True`, iff this pattern is for an event handler.
"""
return self._target == Pattern.URI_TARGET_HANDLER
def is_exception(self):
"""
Check if this pattern is for an exception.
:returns bool -- `True`, iff this pattern is for an exception.
"""
return self._target == Pattern.URI_TARGET_EXCEPTION
| [
"tobias.oberstein@tavendo.de"
] | tobias.oberstein@tavendo.de |
ba00017e1750e5d0c550591117dc58935839be12 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /buildtools/third_party/libc++/trunk/test/libcxx/test/format.py | 19c9fc742a497f950919188035ea06e067ed5417 | [
"BSD-3-Clause",
"MIT",
"NCSA"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 7,344 | py | import errno
import os
import time
import lit.Test # pylint: disable=import-error
import lit.TestRunner # pylint: disable=import-error
import lit.util # pylint: disable=import-error
from libcxx.test.executor import LocalExecutor as LocalExecutor
import libcxx.util
class LibcxxTestFormat(object):
"""
Custom test format handler for use with the test format use by libc++.
Tests fall into two categories:
FOO.pass.cpp - Executable test which should compile, run, and exit with
code 0.
FOO.fail.cpp - Negative test case which is expected to fail compilation.
FOO.sh.cpp - A test that uses LIT's ShTest format.
"""
def __init__(self, cxx, use_verify_for_fail, execute_external,
executor, exec_env):
self.cxx = cxx
self.use_verify_for_fail = use_verify_for_fail
self.execute_external = execute_external
self.executor = executor
self.exec_env = dict(exec_env)
# TODO: Move this into lit's FileBasedTest
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
if any([filename.endswith(ext)
for ext in localConfig.suffixes]):
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
def execute(self, test, lit_config):
while True:
try:
return self._execute(test, lit_config)
except OSError as oe:
if oe.errno != errno.ETXTBSY:
raise
time.sleep(0.1)
def _execute(self, test, lit_config):
name = test.path_in_suite[-1]
is_sh_test = name.endswith('.sh.cpp')
is_pass_test = name.endswith('.pass.cpp')
is_fail_test = name.endswith('.fail.cpp')
if test.config.unsupported:
return (lit.Test.UNSUPPORTED,
"A lit.local.cfg marked this unsupported")
script = lit.TestRunner.parseIntegratedTestScript(
test, require_script=is_sh_test)
# Check if a result for the test was returned. If so return that
# result.
if isinstance(script, lit.Test.Result):
return script
if lit_config.noExecute:
return lit.Test.Result(lit.Test.PASS)
# Check that we don't have run lines on tests that don't support them.
if not is_sh_test and len(script) != 0:
lit_config.fatal('Unsupported RUN line found in test %s' % name)
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir,
tmpBase)
script = lit.TestRunner.applySubstitutions(script, substitutions)
# Dispatch the test based on its suffix.
if is_sh_test:
if not isinstance(self.executor, LocalExecutor):
# We can't run ShTest tests with a executor yet.
# For now, bail on trying to run them
return lit.Test.UNSUPPORTED, 'ShTest format not yet supported'
return lit.TestRunner._runShTest(test, lit_config,
self.execute_external, script,
tmpBase)
elif is_fail_test:
return self._evaluate_fail_test(test)
elif is_pass_test:
return self._evaluate_pass_test(test, tmpBase, lit_config)
else:
# No other test type is supported
assert False
def _clean(self, exec_path): # pylint: disable=no-self-use
libcxx.util.cleanFile(exec_path)
def _evaluate_pass_test(self, test, tmpBase, lit_config):
execDir = os.path.dirname(test.getExecPath())
source_path = test.getSourcePath()
exec_path = tmpBase + '.exe'
object_path = tmpBase + '.o'
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
try:
# Compile the test
cmd, out, err, rc = self.cxx.compileLinkTwoSteps(
source_path, out=exec_path, object_file=object_path,
cwd=execDir)
compile_cmd = cmd
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report += "Compilation failed unexpectedly!"
return lit.Test.FAIL, report
# Run the test
local_cwd = os.path.dirname(source_path)
env = None
if self.exec_env:
env = self.exec_env
# TODO: Only list actually needed files in file_deps.
# Right now we just mark all of the .dat files in the same
# directory as dependencies, but it's likely less than that. We
# should add a `// FILE-DEP: foo.dat` to each test to track this.
data_files = [os.path.join(local_cwd, f)
for f in os.listdir(local_cwd) if f.endswith('.dat')]
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
local_cwd, data_files, env)
if rc != 0:
report = libcxx.util.makeReport(cmd, out, err, rc)
report = "Compiled With: %s\n%s" % (compile_cmd, report)
report += "Compiled test failed unexpectedly!"
return lit.Test.FAIL, report
return lit.Test.PASS, ''
finally:
# Note that cleanup of exec_file happens in `_clean()`. If you
# override this, cleanup is your reponsibility.
libcxx.util.cleanFile(object_path)
self._clean(exec_path)
def _evaluate_fail_test(self, test):
source_path = test.getSourcePath()
with open(source_path, 'r') as f:
contents = f.read()
verify_tags = ['expected-note', 'expected-remark', 'expected-warning',
'expected-error', 'expected-no-diagnostics']
use_verify = self.use_verify_for_fail and \
any([tag in contents for tag in verify_tags])
extra_flags = []
if use_verify:
extra_flags += ['-Xclang', '-verify',
'-Xclang', '-verify-ignore-unexpected=note']
cmd, out, err, rc = self.cxx.compile(source_path, out=os.devnull,
flags=extra_flags)
expected_rc = 0 if use_verify else 1
if rc == expected_rc:
return lit.Test.PASS, ''
else:
report = libcxx.util.makeReport(cmd, out, err, rc)
report_msg = ('Expected compilation to fail!' if not use_verify else
'Expected compilation using verify to pass!')
return lit.Test.FAIL, report + report_msg + '\n'
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
c320a6011e9be62d78fc5e08b3e9fbb8943f2cab | b1efb356e55df6a5f1243d803d51b8e9bb6e6938 | /nextgisweb/resmeta/__init__.py | e121253161e1db9ff2d05f3c3b4cccaaf10b7ef7 | [] | no_license | neroks/nextgisweb | 6fa6621824db05e51316bf993125f79773c97932 | 59ed0e9637a3df0e2388160d9871b435beeaa466 | refs/heads/2 | 2021-01-15T15:05:02.615869 | 2015-10-05T10:39:34 | 2015-10-05T10:39:34 | 43,687,459 | 0 | 0 | null | 2015-10-05T13:58:10 | 2015-10-05T13:58:10 | null | UTF-8 | Python | false | false | 425 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..component import Component, require
from .util import COMP_ID
from .model import Base
@Component.registry.register
class ResourceMetadataComponent(Component):
identity = COMP_ID
metadata = Base.metadata
@require('resource')
def setup_pyramid(self, config):
from . import view # NOQA
view.setup_pyramid(self, config)
| [
"me@dezhin.net"
] | me@dezhin.net |
44edd73cb45272446b82301c8236ea055efbeea5 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/ghostnet_quant/src/config.py | c5428ae53bc66a09547fb9cba9eba55da4895044 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 1,599 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config_ascend = ed({
"num_classes": 37,
"image_height": 224,
"image_width": 224,
"batch_size": 256,
"epoch_size": 200,
"warmup_epochs": 4,
"lr": 0.4,
"momentum": 0.9,
"weight_decay": 4e-5,
"label_smooth": 0.1,
"loss_scale": 1024,
"save_checkpoint": True,
"save_checkpoint_epochs": 1,
"keep_checkpoint_max": 200,
"save_checkpoint_path": "./checkpoint",
})
config_gpu = ed({
"num_classes": 37,
"image_height": 224,
"image_width": 224,
"batch_size": 3,
"epoch_size": 370,
"warmup_epochs": 4,
"lr": 0.4,
"momentum": 0.9,
"weight_decay": 4e-5,
"label_smooth": 0.1,
"loss_scale": 1024,
"save_checkpoint": True,
"save_checkpoint_epochs": 1,
"keep_checkpoint_max": 500,
"save_checkpoint_path": "./checkpoint",
})
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
c3db3977e70bcce001aa8e9e81b214149ce62687 | f8104b29a8d0dbeb407060e494a206ca69335aeb | /tools/datasets/voc/voc_statistic.py | d3c9079646b9535e532a50f45ced63ee3ca400ec | [] | no_license | Sebastixian/wwtool | c19f665f96e8b942e94af47db590f5bb28072f06 | 2f462a3d028b766234d62a3ef706a0f08f10680a | refs/heads/master | 2023-06-01T04:21:22.066639 | 2021-06-25T07:40:13 | 2021-06-25T07:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,508 | py | import wwtool
coco_small_class = { 1: 'airplane',
2: 'bridge',
3: 'storage-tank',
4: 'ship',
5: 'swimming-pool',
6: 'vehicle',
7: 'person',
8: 'wind-mill'}
ann_file_name = ['voc', 'merge']
# ann_file_name.append('small_object')
ann_file = './data/{}/v1/coco/annotations/{}.json'.format(ann_file_name[0], '_'.join(ann_file_name))
size_measure_by_ratio = False
if size_measure_by_ratio == False:
size_set = [4*4, 8*8, 16*16, 32*32, 64*64, 64*64]
label_set = ["4*4", "8*8", "16*16", "32*32", "64*64", "64*64-inf"]
else:
size_set = [0.12/100, 1.08/100, 9.72/100]
label_set = ["0.12/100", "1.08/100", "9.72/100"]
class_instance = wwtool.Small()
statistic = wwtool.COCO_Statistic(ann_file, size_set=size_set, label_set=label_set, size_measure_by_ratio=size_measure_by_ratio, class_instance=None, show_title=False)
for pie_flag in [False, True]:
statistic.total_size_distribution(plot_pie=pie_flag, save_file_name=ann_file_name[:])
for number_flag in [False, True]:
statistic.class_size_distribution(coco_class=None, save_file_name=ann_file_name[:], number=number_flag)
statistic.image_object_num_distribution(save_file_name=ann_file_name[:])
statistic.object_aspect_ratio_distribution(save_file_name=ann_file_name[:])
# statistic.class_num_per_image(coco_class=coco_dior_class, save_file_name=ann_file_name[:]) | [
"jwwangchn@outlook.com"
] | jwwangchn@outlook.com |
aaf2b699724a92ae1623e014c4f605c3897e122f | 5afb3dff6e99d9bf18208c83afb7a7d65f26bbd7 | /licode/st214.py | ccc8e64a5658d6d4189c7d1262b890e6fefb9db5 | [] | no_license | yanfriend/python-practice | 4b565e58db9cdc7596f9135a8f7b9bae4be18de3 | 236a74c0d1d84f730fa5d10146cc201a4c49567d | refs/heads/master | 2018-12-09T00:34:13.747045 | 2018-12-03T04:53:17 | 2018-12-03T04:53:17 | 62,094,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | """
Given a string S, you are allowed to convert it to a palindrome by adding characters in front of it.
Find and return the shortest palindrome you can find by performing this transformation.
For example:
Given "aacecaaa", return "a aacecaaa".
Given "abcd", return "dcb abcd".
"""
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
r=s[::-1]
for i in range(len(s)+1):
if s.startswith(r[i:]):
return r[:i]+s
# time out below
# pal_end=len(s)-1
# for i in range(len(s)-1,-1,-1):
# if self.is_pan(s,i):
# pal_end=i
# break
# return s[len(s)-1:pal_end:-1]+s
#
# def is_pan(selfs,s,end):
# l=0; r=end
# while (l<=end):
# if s[l]!=s[r]: return False
# l+=1; r-=1
# return True
print Solution().shortestPalindrome("aacecaaa")
| [
"ybai@pinterest.com"
] | ybai@pinterest.com |
165978e3b27287415483b3ffd702ec1802c32d0c | 28691ec55ebce9ec7045d12ea9675932ce12d671 | /py2rhino-project/branches/sandbox2/py2rhino/_make/data/parser_out/mesh/add_mesh.py | a3505018edc9af7061b1bf528d128f8c7977162c | [] | no_license | ianclarksmith/design-automation | 1e71315193effc0c18b4a8b41300bda6f41a3f09 | e27cc028fe582395f4a62f06697137867bb0fc33 | refs/heads/master | 2020-04-22T22:28:39.385395 | 2009-10-26T02:48:37 | 2009-10-26T02:48:37 | 37,266,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | add_mesh = {
"input_folder_name": "Mesh_Methods",
"input_file_name": "AddMesh",
"output_package_name": "mesh",
"output_module_name": "add_mesh",
"doc_html": """
Adds a mesh object to the document.
""",
"syntax_html": {
0: ("arrVertices", "arrFaceVertices", "arrVertexNormals", "arrTextureCoordinates", "arrVertexColors"),
},
"params_html": {
0: {
"name": "arrVertices",
"py_name": "vertices",
"opt_or_req": "Required",
"type": "Array",
"name_prefix": "arr_of_dbl",
"name_main": "Vertices",
"doc": """
An array of 3-D points defining the vertices of the mesh.
"""
},
1: {
"name": "arrFaceVertices",
"py_name": "face_vertices",
"opt_or_req": "Required",
"type": "Array",
"name_prefix": "arr_of_int",
"name_main": "FaceVertices",
"doc": """
An array containing arrays of four numbers that define the vertex indices for each face of the mesh. If the third and forth vertex indices of a face are identical, a triangular face will be created. Otherwise a quad face will be created.
"""
},
2: {
"name": "arrVertexNormals",
"py_name": "vertex_normals",
"opt_or_req": "Optional",
"type": "Array",
"name_prefix": "arr_of_dbl",
"name_main": "VertexNormals",
"doc": """
An array of 3-D vectors defining the vertex normals of the mesh. Note, for every vertex, the must be a corresponding vertex normal.
"""
},
3: {
"name": "arrTextureCoordinates",
"py_name": "texture_coordinates",
"opt_or_req": "Optional",
"type": "Array",
"name_prefix": "arr_of_dbl",
"name_main": "TextureCoordinates",
"doc": """
An array of 2-D texture coordinates. Note, for every vertex, there must be a corresponding texture coordinate.
"""
},
4: {
"name": "arrVertexColors",
"py_name": "vertex_colors",
"opt_or_req": "Optional",
"type": "Array",
"name_prefix": "arr_of_int",
"name_main": "VertexColors",
"doc": """
An array of RGB color values. Note, for every vertex, there must be a corresponding vertex color.
"""
},
},
"returns_html": {
0: {
"type": "string",
"doc": "The identifier of the new object if successful."
},
1: {
"type": "null",
"doc": "If not successful, or on error."
},
},
"id_com": 494,
"params_com": {
0: {
"name": "vaVertices",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
1: {
"name": "vaFaces",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
2: {
"name": "vaNormals",
"opt_or_req": "Optional",
"type": "tagVARIANT",
},
3: {
"name": "vaTextures",
"opt_or_req": "Optional",
"type": "tagVARIANT",
},
4: {
"name": "vaColors",
"opt_or_req": "Optional",
"type": "tagVARIANT",
},
},
"returns_com": "tagVARIANT",
}
| [
"patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb"
] | patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb |
18b97799ee7cf38b86f53f0b5afee75638cfc52f | ac2142d192bde034ae3c6d7e07045c39d9a34aa3 | /面试题/字典按value排序.py | b3ea5016619987b3864c95af55647a3b65e58e5e | [] | no_license | budaLi/-500- | ee33a93a6c7f7d36e30a29dd1e12634034712d12 | 69c42389717f003198f652035bfc922eac8a6fef | refs/heads/master | 2022-11-20T08:42:24.255264 | 2020-07-22T07:41:52 | 2020-07-22T07:41:52 | 281,352,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # @Time : 2020/7/22 15:14
# @Author : Libuda
# @FileName: 字典按value排序.py
# @Software: PyCharm
d = {'a': 24, 'g': 52, 'i': 12, 'k': 33}
def sort(dict):
# 会将字典变为元祖的形式
print(dict.items())
# revers代表是否逆序
dict = sorted(dict.items(),key=lambda x:x[1],reverse=True)
return dict
if __name__ == '__main__':
s = sort(d)
print(s) | [
"1364826576@qq.com"
] | 1364826576@qq.com |
ee03770234e0e1877af6b6d638fd9b1c0e787c32 | e3a7622a4d2e16b1683c183568341b39c0de88b4 | /PycharmProjects/PythonCodes/07-爬虫/01-urllib库基础用法/10-贴吧.py | 360f5374537cd289b22d10d8cba9973508583f52 | [] | no_license | TriggerDark/StudyCodes | 937a8f6988cb475d275ff429cd32df823e457296 | 6f2f339d47dbae10d55d6b6da1d7e107f7dec85f | refs/heads/master | 2022-02-09T13:52:10.895963 | 2019-03-30T13:38:55 | 2019-03-30T13:38:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | from urllib import parse
from urllib.request import Request, urlopen
def loadPage(url, filename):
"""
作用:根据url发送请求,获取服务器响应文件
url: 需要爬取的url地址
filename: 处理的文件名
"""
ua_headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36"
}
print("正在下载" + filename)
request = Request(url, headers=ua_headers)
return urlopen(request).read()
def writePage(html, filename):
"""
作用:将html内容写入文件
html: 服务器响应的文件内容
"""
print("正在保存" + filename)
with open(filename, "w") as f:
f.write(str(html))
print("-"*30)
def Spider(url, beginPage, endPage):
"""
作用:贴吧爬虫调度器,负责组合处理每个页面的url
url: 贴吧url的前部分
beginPage: 起始页面
endPage: 结束页面
"""
for page in range(beginPage, endPage + 1):
pn = (page - 1)*50
fullurl = url + "&pn=" + str(pn)
filename = "第" + str(page) + "页.html"
html = loadPage(fullurl, filename)
writePage(html, filename)
print("谢谢使用")
if __name__ == "__main__":
kw = input("请输入贴吧名:")
beginPage = int(input("请输入起始页:"))
endPage = int(input("请输入结束页:"))
url = "http://tieba.baidu.com/f?"
kw = parse.urlencode({"kw": kw})
fullurl = url + kw
Spider(fullurl, beginPage, endPage) | [
"2413044193@qq.com"
] | 2413044193@qq.com |
5b65f2b9d93b31d704c150d79dd67ce87b6c23f7 | 47366be5cbee9d7e086291c20f97f10ab2bf74fe | /code/journal_gemello_all_homes_leave_one_out_cluster.py | ebdf022a4357b68316c0ca6f4ab5b8ca31c44d8f | [] | no_license | nipunbatra/journal | 3d44eed05c95970606649d17402da54fc0a415ff | 94a8b88589e8f60e6f0314f8c5a374f22336b3e9 | refs/heads/master | 2021-01-09T20:40:45.844121 | 2016-07-27T15:16:29 | 2016-07-27T15:16:29 | 62,874,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,837 | py | """
This code generates the prediction for a region when we use homes containing all data
"""
# NEED TO RUN ON CLUSTER
import sys
CLUSTER = True
if CLUSTER:
sys.path.insert(0, '/if6/nb2cz/anaconda/lib/python2.7/site-packages')
import numpy as np
import pandas as pd
import pickle
from collections import OrderedDict
out_overall = pickle.load(open('../data/input/all_regions.pkl', 'r'))
region = "Austin"
df = out_overall[region]
df = df.rename(columns={'house_num_rooms': 'num_rooms',
'num_occupants': 'total_occupants',
'difference_ratio_min_max': 'ratio_difference_min_max'})
df = df[(df.full_agg_available == 1) & (df.md_available == 1)]
def scale_0_1(ser, minimum=None, maximum=None):
if minimum is not None:
pass
else:
minimum = ser.min()
maximum = ser.max()
return (ser - minimum).div(maximum - minimum)
def normalise(df):
new_df = df.copy()
max_aggregate = df[["aggregate_%d" % i for i in range(1, 13)]].max().max()
min_aggregate = df[["aggregate_%d" % i for i in range(1, 13)]].min().min()
new_df[["aggregate_%d" % i for i in range(1, 13)]] = scale_0_1(df[["aggregate_%d" % i for i in range(1, 13)]],
min_aggregate, max_aggregate)
for col in ['area', 'total_occupants', 'num_rooms', 'ratio_min_max',
'skew', 'kurtosis', 'variance', 'ratio_difference_min_max', 'p_25',
'p_50', 'p_75']:
new_df[col] = scale_0_1(df[col])
return new_df
df = normalise(df)
from all_functions import *
from features import *
import sys
from sklearn.neighbors import KNeighborsRegressor
from sklearn.cross_validation import ShuffleSplit
NUM_NEIGHBOUR_MAX = 6
F_MAX = 6
K_min, K_max = 1,6
F_min, F_max=1,8
import json
from sklearn.cross_validation import LeaveOneOut
from sklearn.cross_validation import KFold
from sklearn.ensemble import ExtraTreesRegressor
def _save_csv(out_df, path, appliance, num_homes, start_seed, end_seed, feature):
out_df.T.to_csv("%s/%s_%d_%d_%d_%s.csv" %(path, appliance, num_homes, start_seed, end_seed, feature),
index_label="Random seed")
def _find_accuracy(home, appliance, feature="Monthly"):
np.random.seed(42)
appliance_df = df.ix[all_homes]
if appliance=="hvac":
start, stop=5, 11
else:
start, stop=1, 13
test_homes = [home]
train_homes = appliance_df[~appliance_df.index.isin([home])].index
#all_home_appliance = deepcopy(all_homes)
#all_home_appliance[appliance] = train_homes
# Cross validation on inner loop to find best feature, K
train_size = len(train_homes)
l = LeaveOneOut(train_size)
out = OrderedDict()
for cv_train, cv_test in l:
#print cv_test
cv_train_home=appliance_df.ix[train_homes[cv_train]]
cv_test_home = appliance_df.ix[train_homes[cv_test]]
test_home_name = cv_test_home.index.values[0]
#print cv_test_home
out[test_home_name]={}
# Summing up energy across start to stop to get Y to learn optimum feature on
Y = cv_train_home[['%s_%d' %(appliance, i) for i in range(start, stop)]].sum(axis=1).values
forest = ExtraTreesRegressor(n_estimators=250,
random_state=0)
forest.fit(cv_train_home[feature_map[feature]], Y)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
# Now varying K and top-N features
for K in range(K_min, K_max):
out[test_home_name][K]={}
for top_n in range(F_min,F_max):
out[test_home_name][K][top_n]=[]
top_n_features = cv_train_home[feature_map[feature]].columns[indices][:top_n]
# Now fitting KNN on this
for month in range(start, stop):
clf = KNeighborsRegressor(n_neighbors=K)
clf.fit(cv_train_home[top_n_features], cv_train_home['%s_%d' %(appliance, month)])
out[test_home_name][K][top_n].append(clf.predict(cv_test_home[top_n_features]))
# Now, finding the (K, top_n) combination that gave us best accuracy on CV test homes
accur = {}
for K in range(K_min, K_max):
accur[K] = {}
for top_n in range(F_min, F_max):
temp = {}
for h in out.iterkeys():
pred = pd.DataFrame(out[h][K][top_n]).T
#all_but_h = [x for x in out.keys() if x!=h]
pred.index = [h]
pred.columns = [['%s_%d' %(appliance, i) for i in range(start, stop)]]
gt = appliance_df.ix[h][['%s_%d' %(appliance, i) for i in range(start, stop)]]
error = (pred-gt).abs().div(gt).mul(100)
#print pred, gt, error
mean_error = error.mean().mean()
#print mean_error
temp[h]=mean_error
ac = pd.Series(temp).mean()
accur[K][top_n] = ac
accur_df = pd.DataFrame(accur)
#print accur_df
accur_min = accur_df.min().min()
max_ac_df = accur_df[accur_df==accur_min]
F_best = cv_train_home[feature_map[feature]].columns[indices][:max_ac_df.mean(axis=1).dropna().index.values[0]].tolist()
K_best = max_ac_df.mean().dropna().index.values[0]
# Now predicting for test home
train_overall = appliance_df.ix[appliance_df[~appliance_df.index.isin([home])].index]
test_overall = appliance_df[appliance_df.index.isin([home])]
pred_test = {}
gt_test = {}
for month in range(start, stop):
clf = KNeighborsRegressor(n_neighbors=K_best)
clf.fit(train_overall[F_best], train_overall['%s_%d' %(appliance, month)])
pred_test[month] = clf.predict(test_overall[F_best])
gt_test[month] = test_overall['%s_%d' %(appliance, month)]
#json.dump({'f':F_best, 'k':K_best,'accuracy':accur_max},open("../main-out-new/%s_%s_%d.json" %(appliance,feature, home),"w") )
pred_df = pd.DataFrame(pred_test)
pred_df.index = [home]
gt_df = pd.DataFrame(gt_test)
error = (gt_df-pred_df).abs().div(gt_df).mul(100)
return pred_df, gt_df, error, F_best, K_best
import os
out_path = os.path.expanduser("~/output/journal/gemello/all_homes/")
import sys
appliances = ["hvac","fridge","wm","dw","mw","oven"]
features = ["Static", "Monthly+Static", "Monthly"]
SLURM_OUT = "../slurm_out"
from subprocess import Popen
import time
for feature in features:
for appliance in appliances:
if appliance=="hvac":
start, stop=5, 11
else:
start, stop=1, 13
appliance_df= df.ix[df[['%s_%d' %(appliance,month) for month in range(start,stop)]].dropna().index]
for home in appliance_df.index:
home = int(home)
if appliance=="hvac":
start, stop=5, 11
else:
start, stop=1, 13
appliance_df = df.ix[df[['%s_%d' %(appliance,month) for month in range(start,stop)]].dropna().index]
all_homes = appliance_df.index
pred_df, gt_df, error, F_best, K_best = _find_accuracy(home, appliance, feature)
print appliance, home, feature, error.squeeze().mean()
if not os.path.exists(out_path):
print "here"
os.makedirs(out_path)
import pickle
filename = os.path.join(out_path, "%s_%s_%d.pkl" %(appliance,feature,home))
o = {'pred_df':pred_df,'gt_df':gt_df,'error':error,
'F_best':F_best,'K_best':K_best}
pickle.dump(o, open(filename,'w'))
#_save_csv(out_overall, os.path.expanduser("~/output/unified/kdd_all_features/"), appliance, num_homes, start_seed, end_seed, feature)
| [
"nipunb@iiitd.ac.in"
] | nipunb@iiitd.ac.in |
2c70798e16ac553da37c52d63676ab39931ffc65 | ee6acbd5fcd0fcd16230e96a4a539de41a02c97e | /operators/prometheus/python/pulumi_pulumi_kubernetes_crds_operators_prometheus/monitoring/v1/__init__.py | c08014ce868b9d012e89a5e5a10d7f8c359ea029 | [
"Apache-2.0"
] | permissive | isabella232/pulumi-kubernetes-crds | 777e78137aaf6525a44b61a02dccf91bf0d87a14 | 372c4c0182f6b899af82d6edaad521aa14f22150 | refs/heads/master | 2023-03-15T04:29:16.039753 | 2020-12-30T19:35:54 | 2020-12-30T19:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .Alertmanager import *
from .PodMonitor import *
from .Prometheus import *
from .PrometheusRule import *
from .ServiceMonitor import *
from .ThanosRuler import *
from ._inputs import *
from . import outputs
| [
"albertzhong0@gmail.com"
] | albertzhong0@gmail.com |
520ff5a148a237cbc9d139f883a89e8e8f2cc1e3 | d53d639db5a7a71a904d811dc271e19f294baa9d | /Travel/travelsapp/migrations/0023_auto_20200621_1809.py | aa56e73621cff30fb92bfc4e61f6ae612c8b6535 | [] | no_license | sdrsnadkry/django | e8a0f6d91094ae27ef3d5aef0c0f667b0b917ce7 | 83126dec62da6a715fab8654852f46fdeedc30f2 | refs/heads/master | 2022-11-27T04:38:25.891650 | 2020-08-07T07:46:42 | 2020-08-07T07:46:42 | 285,762,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | # Generated by Django 3.0.7 on 2020-06-21 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travelsapp', '0022_bookings'),
]
operations = [
migrations.AlterField(
model_name='bookings',
name='date',
field=models.DateField(),
),
]
| [
"sdadkry95@gmail.com"
] | sdadkry95@gmail.com |
5977f8f99167e8f39b9114736637b2fe01fddf7f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_bailed.py | 781bd6fa9e0ab38271aa62f4d74a985f9d2158ed | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _BAILED():
def __init__(self,):
self.name = "BAILED"
self.definitions = bail
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bail']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
00865f4edca156b1bb9f7af208f6e517657f0266 | b2fb13181e5fe114c5b128f30b3946024347321d | /Day12/Day12.py | 7198292796be9bd1b8f45bd64509cd79c1d6188d | [] | no_license | darsovit/AdventOfCode2017 | 43aefaefcad866fde38ad960106d20ab1d8d28bf | 8db815ea2ea618e25dd5946988d88c34563f0ace | refs/heads/master | 2021-09-01T08:21:41.369752 | 2017-12-26T01:12:46 | 2017-12-26T01:17:42 | 113,674,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | #!python
def parse_line( line, state ):
first=line.split(' <-> ')
state['connections'][int(first[0])]=list(map(lambda x: int(x), first[1].split(', ')))
print(state['connections'][int(first[0])])
def build_group( state, base ):
state['indirect'][base]=set( [ base ] )
startlen=0
endlen=1
new_entries=state['indirect'][base]
while startlen < endlen:
origset=state['indirect'][base]
newset=origset
startlen=len(newset)
for x in new_entries:
newset = newset | set( state['connections'][x] )
state['indirect'][base] = newset
endlen=len(newset)
new_entries = newset - origset
for x in state['indirect'][base]:
state['ingroup'][x] = base
state={}
state['connections']={}
state['indirect']={}
state['ingroup']={}
with open('input.txt') as f:
for line in f:
parse_line( line.strip(), state )
#state['indirect']={}
#state['indirect'][0]=set( state['connections'][0] )
#print( state['indirect'][0] )
#endlen=len(state['indirect'][0])
#startlen=0
#while startlen < endlen:
# newset=state['indirect'][0]
# startlen=len(newset)
# for x in state['indirect'][0]:
# newset = newset | set( state['connections'][x])
# state['indirect'][0] = newset
# endlen=len(newset)
for base in sorted(state['connections'].keys()):
if base not in state['ingroup']:
build_group( state, base )
print( len(state['indirect'][0]) )
print( len(state['indirect']) ) | [
"darsovit@gmail.com"
] | darsovit@gmail.com |
fb0c35c791681a9e193d1bfbbc7378ee78426409 | 182c651a9b00b9b4d80e6d51ae574cb793958cd6 | /quick/tutorials/extending/chapter3-bindings/chapter3-bindings.py | aa415cc1f6041c84a4719a593559bf809dbe9dc5 | [] | no_license | eudu/pyqt-examples | c61a7108e1fbfcf2cd918a0f99e9a5a90a3f305c | 8e533b7b3c5e9bbe0617ef1ecb9b169dd216c181 | refs/heads/master | 2020-03-16T01:23:19.573347 | 2018-05-06T20:20:57 | 2018-05-06T20:20:57 | 132,438,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,702 | py | #!/usr/bin/python3
#############################################################################
##
## Copyright (C) 2018 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Digia Plc and its Subsidiary(-ies) nor the names
## of its contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QRectF, Qt, QUrl
from PyQt5.QtGui import QColor, QGuiApplication, QPainter, QPen
from PyQt5.QtQml import qmlRegisterType
from PyQt5.QtQuick import QQuickPaintedItem, QQuickView
class PieChart(QQuickPaintedItem):
@pyqtProperty(str)
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
colorChanged = pyqtSignal()
@pyqtProperty(QColor, notify=colorChanged)
def color(self):
return self._color
@color.setter
def color(self, color):
if self._color != color:
self._color = QColor(color)
self.update()
self.colorChanged.emit()
def __init__(self, parent=None):
super(PieChart, self).__init__(parent)
self._name = ''
self._color = QColor()
def paint(self, painter):
painter.setPen(QPen(self._color, 2))
painter.setRenderHints(QPainter.Antialiasing, True)
rect = QRectF(0, 0, self.width(), self.height()).adjusted(1, 1, -1, -1)
painter.drawPie(rect, 90 * 16, 290 * 16)
@pyqtSlot()
def clearChart(self):
self.color = QColor(Qt.transparent)
self.update()
if __name__ == '__main__':
import os
import sys
app = QGuiApplication(sys.argv)
qmlRegisterType(PieChart, "Charts", 1, 0, "PieChart")
view = QQuickView()
view.setResizeMode(QQuickView.SizeRootObjectToView)
view.setSource(
QUrl.fromLocalFile(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'app.qml')))
view.show()
sys.exit(app.exec_())
| [
"dukalow@gmail.com"
] | dukalow@gmail.com |
d16aa6923b579e95b3a303185a0f8ef9a768ae8a | 261eba086816dbb3db4836c9b1e5869ccf0f8bae | /牛顿迭代法求解非线性方程/main.py | aa97a1a66a5a9ba51a601b6eb2e5e2b6e71c7523 | [] | no_license | budaLi/jianzi | e316bdfb25587d14d38f1bea98772bce5ac69198 | bca098de0f06ae1c78afc3203dfb0eea6a412dee | refs/heads/master | 2023-05-02T19:33:25.752799 | 2021-05-25T08:03:24 | 2021-05-25T08:03:24 | 271,513,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py | # coding=utf-8
# Copyright (c) 2020 ichinae.com, Inc. All Rights Reserved
"""
Module Summary Here.
Authors: lijinjun1351@ichinae.com
"""
from numpy import *
import numpy as np
def Fun(x,num): #方程组在这里,两个变量分别是x的两个分量,num是未知数个数,这里是2,f是2个方程组
i = num
f = np.zeros((i),dtype=float)
f[0] = x[0]+2*x[1]-3
f[1] = 2*x[0]**2 + x[1]**2-5
return f
def dfun(x,num): #计算雅可比矩阵的逆矩阵
df = np.zeros((num,num),dtype=float)
dx = 0.00001 #
x1 = np.copy(x)
for i in range(0,num): # 求导数,i是列,j是行
for j in range(0,num):
x1 = np.copy(x)
x1[j] = x1[j]+dx #x+dx
df[i,j] = (Fun(x1,num)[i]-Fun(x,num)[i])/dx #f(x+dx)-f(x)/dx
df_1 = np.linalg.inv(df) #计算逆矩阵
return df_1
def Newton(x,num):
x1 = np.copy(x)
i = 0
delta = np.copy(x)
while(np.sum(abs(delta)) > 1.e-3): #控制循环次数 10-3
x1 = x-dot(dfun(x,num),Fun(x,num)) #公式
delta = x1-x #比较x的变化
x = x1
i = i+1
return x
def main():
# 方程未知数的个数
num =2
x = np.ones((num),dtype=float)
#初始值
x[0]=1.5
x[1]=1.0
a = Newton(x,num)
print(a)
if __name__ == '__main__':
main() | [
"1364826576@qq.com"
] | 1364826576@qq.com |
2fdbf5995d22d446b67b4557c6fa3d84c77f7861 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-AVFoundation/PyObjCTest/test_avmusicevents.py | c80ab744d2f180dcafae94f772ee789b8b375805 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 4,577 | py | import AVFoundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestAVMusicEvents(TestCase):
def test_constants(self):
self.assertIsEnumType(AVFoundation.AVMIDIControlChangeMessageType)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBankSelect, 0)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeModWheel, 1)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBreath, 2)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeFoot, 4)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypePortamentoTime, 5)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeDataEntry, 6)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVolume, 7)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBalance, 8)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypePan, 10)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeExpression, 11)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeSustain, 64)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypePortamento, 65)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeSostenuto, 66)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeSoft, 67)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeLegatoPedal, 68)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeHold2Pedal, 69)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeFilterResonance, 71)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeReleaseTime, 72)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeAttackTime, 73)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeBrightness, 74)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeDecayTime, 75)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVibratoRate, 76)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVibratoDepth, 77)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeVibratoDelay, 78)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeReverbLevel, 91)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeChorusLevel, 93)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeRPN_LSB, 100)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeRPN_MSB, 101)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeAllSoundOff, 120)
self.assertEqual(
AVFoundation.AVMIDIControlChangeMessageTypeResetAllControllers, 121
)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeAllNotesOff, 123)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeOmniModeOff, 124)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeOmniModeOn, 125)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeMonoModeOn, 126)
self.assertEqual(AVFoundation.AVMIDIControlChangeMessageTypeMonoModeOff, 127)
self.assertIsEnumType(AVFoundation.AVMIDIMetaEventType)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeSequenceNumber, 0x00)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeText, 0x01)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeCopyright, 0x02)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeTrackName, 0x03)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeInstrument, 0x04)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeLyric, 0x05)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeMarker, 0x06)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeCuePoint, 0x07)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeMidiChannel, 0x20)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeMidiPort, 0x21)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeEndOfTrack, 0x2F)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeTempo, 0x51)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeSmpteOffset, 0x54)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeTimeSignature, 0x58)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeKeySignature, 0x59)
self.assertEqual(AVFoundation.AVMIDIMetaEventTypeProprietaryEvent, 0x7F)
@min_os_level("13.0")
def test_constants13_0(self):
self.assertIsInstance(AVFoundation.AVExtendedNoteOnEventDefaultInstrument, int)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
b5ed87c30787e2e63e8deee43faf70ddc16b3e07 | 944401a6292baa2d23b9738898e0b0cb199d0795 | /color_quantization/median-cut/img_quality_assessment(IQA)/ssim/lab_cs/lab_ssim.py | 7d495b83f9e669fec5604bdd7a7b921d4422055b | [] | no_license | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 1,355 | py | """
image quality assessment (IQA) of the quantized images and the original image in L*a*b* color space
----- method: SSIM
----- version 1.0 (skimage library)
----- http://scikit-image.org/docs/dev/auto_examples/transform/plot_ssim.html
"""
import numpy as np
import csv
from PIL import Image
import skimage
from skimage import color
from skimage.measure import compare_ssim
from quantization import median_cut
def main() :
# ---- open the reference image
original_img = Image.open('../../../../img/sky.jpg')
testimg_list = []
for n_colors in range(1, 21):
lab_array = median_cut(original_img, n_colors)
testimg_list.append(lab_array)
# ---- get lab original array
ori_arr = np.array(original_img)
ori_arr_lab = skimage.color.rgb2lab(ori_arr)
# ---- rescale original raster
rescale_ori = (ori_arr_lab + [0, 128, 128]) / [100, 255, 255]
# ---- compare MSSIM
score_list = []
for i in testimg_list:
score = compare_ssim(rescale_ori[:,:,0], i[:,:,0], multichannel=True)
score_list.append(score)
# ---- save ssim score to csv file
csvfile = "ssim_lab_in_L.csv"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in score_list:
writer.writerow([val])
if __name__ == "__main__":
main()
| [
"wnn2260@gmail.com"
] | wnn2260@gmail.com |
a285c1fb0cced66886bb31bfdeadbcf093397cb3 | 64a80df5e23b195eaba7b15ce207743e2018b16c | /Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_ble_adafruit/tone_service.py | 9b2c3972b4b73af73af5cfe20532239dce11384f | [] | no_license | aferlazzo/messageBoard | 8fb69aad3cd7816d4ed80da92eac8aa2e25572f5 | f9dd4dcc8663c9c658ec76b2060780e0da87533d | refs/heads/main | 2023-01-27T20:02:52.628508 | 2020-12-07T00:37:17 | 2020-12-07T00:37:17 | 318,548,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | # The MIT License (MIT)
#
# Copyright (c) 2020 Dan Halbert for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_ble_adafruit.tone_service`
================================================================================
BLE access to play tones.
* Author(s): Dan Halbert
"""
__version__ = "1.2.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Adafruit.git"
import struct
from _bleio import PacketBuffer
from adafruit_ble.attributes import Attribute
from adafruit_ble.characteristics import Characteristic, ComplexCharacteristic
from adafruit_ble_adafruit.adafruit_service import AdafruitService
class _TonePacket(ComplexCharacteristic):
uuid = AdafruitService.adafruit_service_uuid(0xC01)
format = "<HI"
format_size = struct.calcsize(format)
def __init__(self):
super().__init__(
properties=Characteristic.WRITE,
read_perm=Attribute.NO_ACCESS,
max_length=self.format_size,
fixed_length=True,
)
def bind(self, service):
"""Binds the characteristic to the given Service."""
bound_characteristic = super().bind(service)
return PacketBuffer(bound_characteristic, buffer_size=1)
class ToneService(AdafruitService):
"""Play tones."""
uuid = AdafruitService.adafruit_service_uuid(0xC00)
_tone_packet = _TonePacket()
"""
Tuple of (frequency: 16 bits, in Hz, duration: 32 bits, in msecs).
If frequency == 0, a tone being played is turned off.
if duration == 0, play indefinitely.
"""
def __init__(self, service=None):
super().__init__(service=service)
self._tone_packet_buf = bytearray(_TonePacket.format_size)
@property
def tone(self):
"""Return (frequency, duration), or None if no value available"""
buf = self._tone_packet_buf
if self._tone_packet.readinto(buf) == 0:
# No new values available.
return None
return struct.unpack(_TonePacket.format, buf)
def play(self, frequency, duration):
"""
Frequency is in Hz. If frequency == 0, a tone being played is turned off.
Duration is in seconds. If duration == 0, play indefinitely.
"""
self._tone_packet = struct.pack(
_TonePacket.format,
frequency,
0 if duration == 0 else int(duration * 1000 + 0.5),
)
| [
"aferlazzo@gmail.com"
] | aferlazzo@gmail.com |
0cbb3fd6548df0a2c7dbfccdbd9d8f5aa52fcbf4 | 74eee5bdaae10b2cfbd936e3c10cc9c91b9220e0 | /Chapter 10 - Binary Trees/10.5_sum_of_binary_paths.py | b2597339593a8a6654df8b23d61e02bd1bb35045 | [] | no_license | kishan/Elements-of-Porgramming-Interviews-Python-Solutions | abc02af36102e059f7213610ce948a000879e9ec | 32fe89b4927da8e026ff27a6b9894f639a8a2de9 | refs/heads/master | 2020-12-25T06:52:42.729405 | 2016-08-26T16:08:40 | 2016-08-26T16:08:40 | 62,021,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | """
Consider a binary tree in which each node contains a binary digit
Design an alogirthm to compute the sum of the binary numbers represented by the root-to-leaf paths
"""
def sum_to_root(node, partial_sum=0):
if node is None:
return 0
partial_sum = partial_sum*2 + node.data
if (node.left is None) and (node.right is None):
return partial_sum
else:
return sum_to_root(node.left, partial_sum) + sum_to_root(node.right, partial_sum)
| [
"kspatel2018@gmail.com"
] | kspatel2018@gmail.com |
af588fafe20c072ff22dc603dd5b083235819834 | 9f835d53232e954805b7ed1d93889e409209b36b | /2920.py | 3fe5d6d26574cdf3a6e64e3a56708f6a5b2b8766 | [] | no_license | dmswl0311/Baekjoon | 7c8a862fceff086b3d7740eef23b80164e1d5aeb | 22040aff6b64d5081e86d91b0d118d1a718a4316 | refs/heads/master | 2023-04-29T13:48:51.448245 | 2021-05-26T14:35:32 | 2021-05-26T14:35:32 | 323,482,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | num_list = list(map(int, input().split()))
ori_list = [1, 2, 3, 4, 5, 6, 7, 8]
if num_list == ori_list:
print("ascending")
elif num_list == sorted(ori_list, reverse=True):
print("descending")
else:
print("mixed")
| [
"dmswl_0311@naver.com"
] | dmswl_0311@naver.com |
a1a3d80bb3ff411b6a757c8662ed29b06d159dea | 4236d1c3b153847f888402af5dd218fe4004fddc | /events/models.py | 1fc1f12110011a0687ae2dbd3c8b8e0663e5ceef | [] | no_license | zurcx/zurczevents | 6feaae655dcc7bac08a9366e72e65da0ecace69d | 5134841e0afca3bc1f88e1d27980a58f91f5d984 | refs/heads/master | 2020-05-31T04:40:55.735346 | 2013-08-02T18:07:22 | 2013-08-02T18:07:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | #encoding: utf-8
from django.contrib.auth.models import User
from django.db import models
from django.core.urlresolvers import reverse
class Event(models.Model):
TYPE_CHOICES = (
(1, u'Workshop'),
(2, u'Dojo'),
(3, u'Palestra'),
)
name = models.CharField(verbose_name=u'Nome', max_length=100)
user = models.ForeignKey(User, verbose_name=u'Usuário',
null=True, blank=True)
type = models.IntegerField(choices=TYPE_CHOICES,
verbose_name=u'Tipo do Evento')
description = models.TextField(verbose_name=u'Descrição',
blank=True)
created_on = models.DateTimeField(verbose_name=u'Criado em',
auto_now_add=True)
link = models.URLField(verbose_name=u'Link', blank=True)
public = models.BooleanField(verbose_name=u'Público?',
default=True)
event_date = models.DateField(verbose_name=u'Data do Evento',
null=True, blank=True)
def comments_count(self):
return self.comments.count()
comments_count.short_description = u'Número de Comentários'
@models.permalink
def get_absolute_url(self):
return ('events_details', (), {'pk': self.pk})
def __unicode__(self):
return self.name
class Meta:
verbose_name = u'Evento'
verbose_name_plural = u'Eventos'
ordering = ['name']
class Comment(models.Model):
name = models.CharField(verbose_name=u'Nome',
max_length=100)
email = models.EmailField(verbose_name=u'E-mail',)
event = models.ForeignKey(Event, verbose_name=u'Evento',
related_name='comments')
text = models.TextField(verbose_name=u'Texto')
website = models.URLField(verbose_name=u'Perfil Facebook',
blank=True)
created_on = models.DateTimeField(verbose_name=u'Criado em',
auto_now_add=True)
def __unicode__(self):
return self.text
class Meta:
verbose_name = u'Comentário'
verbose_name_plural = u'Comentários'
ordering = ['created_on'] | [
"luizfabiodacruz@gmail.com"
] | luizfabiodacruz@gmail.com |
935f188168e56f7d9e289270aa76cbc5f4770897 | 08db28fa3836c36433aa105883a762396d4883c6 | /combine/opencv.py | eb63b33de40c9e1e7a9507722be5ef552d7aa6ad | [] | no_license | xieyipeng/FaceRecognition | 1127aaff0dd121319a8652abcfe8a59a7beaaf43 | dede5b181d6b70b87ccf00052df8056a912eff0f | refs/heads/master | 2022-09-19T07:02:33.624410 | 2020-06-02T03:03:58 | 2020-06-02T03:03:58 | 246,464,586 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | # -*- coding: utf-8 -*-
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from test_mtcnn_wider import face_detect
import cv2
import numpy as np
from test_vgg_ck import emotion
class picture(QWidget):
def __init__(self):
super(picture, self).__init__()
self.data_path = ''
self.rectangles = None
self.resize(300, 400)
self.setWindowTitle("label显示图片")
self.input = QLabel(self)
self.input.move(50, 90)
self.input.setFixedSize(200, 300)
btn = QPushButton(self)
btn.setText("打开图片")
btn.move(50, 30)
btn.clicked.connect(self.openimage)
det = QPushButton(self)
det.setText("检测")
det.move(170, 30)
det.clicked.connect(self.detect)
def openimage(self):
imgName, imgType = QFileDialog.getOpenFileName(self, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
jpg = QtGui.QPixmap(imgName).scaled(self.input.width(), self.input.height())
print(type(jpg))
self.input.setPixmap(jpg)
self.data_path = imgName
def detect(self):
self.rectangles, points = face_detect(image_path=self.data_path)
img = cv2.imread(self.data_path)
for rectangle in self.rectangles:
print(rectangle)
x1, y1, width, height, face_score = rectangle
x1, y1, x2, y2 = x1, y1, x1 + width, y1 + height
cv2.putText(img, str(rectangle[4]), (int(rectangle[0]), int(rectangle[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0))
cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])),
(255, 0, 0), 1)
category, emotion_score = emotion(img[int(y1):int(y2), int(x1):int(x2)])
print(category, emotion_score)
cv2.putText(img, category, (int(x1), int(y1) + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1,
cv2.LINE_AA)
cv2.imshow('image', img)
cv2.waitKey(0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
my = picture()
my.move(300, 300)
my.show()
sys.exit(app.exec_())
| [
"3239202719@qq.com"
] | 3239202719@qq.com |
56b182e8d53ba51c9edcdf868deccdbc7b7f37a1 | eb9ed8351d2e0bb4655c5970e91280767703f1a9 | /user_app/admin.py | 8556d66f35b7faaaf72c4fe7c99710b43de13661 | [
"Apache-2.0"
] | permissive | lmyfzx/Django-Mall | b8c03a7d2ddd56cde7f44b2f9bc8c08a486febab | 13cb59130d15e782f78bc5148409bef0f1c516e0 | refs/heads/master | 2023-01-23T10:17:46.477968 | 2020-11-21T12:44:02 | 2020-11-21T12:44:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | #
# from django.contrib import admin
# from user_app.model.seller_models import Shoppers, Store
#
#
#
# # Register your models here.
#
#
# @admin.register(Store)
# class StoreAdmin(admin.ModelAdmin):
# list_display = ('store_name', 'shopper_name', 'shop_grade', 'start_time', 'province', 'attention')
# # readonly_fields = ('shopper','shop_grade','attention')
# readonly_fields = ('shop_grade', 'attention', 'province', 'shopper')
#
# def shopper_name(self, obj):
# """商家名称"""
# return obj.shopper.username
#
# shopper_name.short_description = '商家名称'
#
# def has_add_permission(self, request):
# return False
#
# def has_delete_permission(self, request, obj=None):
# return False
#
# def get_queryset(self, request):
# result = super().get_queryset(request)
# if not request.user.is_superuser:
# return result.filter(shopper=request.user)
# return result
#
#
# @admin.register(Shoppers)
# class ShoppersAdmin(admin.ModelAdmin):
# # exclude = ('user',)
# list_display = ('shopper_name', 'head_images', 'phone', 'credit', 'sex', 'is_vip')
# readonly_fields = ('credit', 'is_vip', 'user')
#
# def sex(self, obj):
# """性别"""
# return obj.get_sex_display()
#
# def shopper_name(self, obj):
# """商家名称"""
# return obj.user.username
#
# shopper_name.short_description = '商家名称'
#
# def shopper_email(self, obj):
# """商家邮箱"""
# return obj.user.email
#
# shopper_email.short_description = '邮箱'
#
# def has_add_permission(self, request):
# return False
#
# def has_delete_permission(self, request, obj=None):
# return False
#
# def get_queryset(self, request):
# result = super().get_queryset(request)
# if not request.user.is_superuser:
# return result.filter(user=request.user)
# return result
#
#
| [
"syz247179876@126.com"
] | syz247179876@126.com |
60712320e294e2dfe3916fa779900d93f683284e | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/zhzd_add_20190618133428.py | 2ed7c6ff4cf5a493ab9a2255b061e4b7b4995775 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
import pandas as pd
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRzhzd5')#txt目录提取
emrtxt2s = EMRdef.txttq(u'D:\DeepLearning ER\EHRsex')
ryzd = []
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]#提取目录
lines=f.readlines()
lines = ''.join(lines)
lines = re.sub(' ','',lines)
lines = re.split('\n',lines)
for emrtxt2 in emrtxt2s:
f2 = open(emrtxt2,'r',errors="ignore")#中文加入errors
emrpath2 = os.path.basename(emrtxt2)
emrpath2 = os.path.splitext(emrpath2)[0]#提取目录
lines2 = f2.readlines()
lines2 = ''.join(lines2)
if emrpath == emrpath2:
lines.append(lines2)
ryzd.append(lines)
#导入关联规则
import orangecontrib.associate.fpgrowth as oaf
def dealRules(rules):
returnRules = []
for i in rules:
temStr = '';
for j in i[0]: #处理第一个frozenset
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ' ==> '
for j in i[1]:
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ';' +'\t'+str(i[2])+ ';' +'\t'+str(i[3])
# print(temStr)
returnRules.append(temStr)
return returnRules
def dealResult(rules):#对规则处理
returnRules = []
for i in rules:
temStr = '';
for j in i[0]: #处理第一个frozenset
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ' ==> '
for j in i[1]:
temStr = temStr+j+'&'
temStr = temStr[:-1]
temStr = temStr + ';' +'\t'+str(i[2])+ ';' +'\t'+str(i[3])+ ';' +'\t'+str(i[4])+ ';' +'\t'+str(i[5])+ ';' +'\t'+str(i[6])+ ';' +'\t'+str(i[7])
# print(temStr)
returnRules.append(temStr)
return returnRules
def ResultDFToSave(rules): #根据Qrange3关联分析生成的规则得到并返回对于的DataFrame数据结构的函数
returnRules = []
for i in rules:
temList = []
temStr = '';
for j in i[0]: #处理第一个frozenset
temStr = temStr + str(j) + '&'
temStr = temStr[:-1]
temStr = temStr + ' ==> '
for j in i[1]:
temStr = temStr + str(j) + '&'
temStr = temStr[:-1]
temList.append(temStr); temList.append(i[2]); temList.append(i[3]); temList.append(i[4])
temList.append(i[5]); temList.append(i[6]); temList.append(i[7])
returnRules.append(temList)
return pd.DataFrame(returnRules,columns=('规则','项集出现数目','置信度','覆盖度','力度','提升度','利用度'))
if __name__ == '__main__':
supportRate = 0.004
confidenceRate = 0.6
itemsets = dict(oaf.frequent_itemsets(ryzd, supportRate))
rules = oaf.association_rules(itemsets, confidenceRate)
rules = list(rules)
regularNum = len(rules)
printRules = dealRules(rules)
result = list(oaf.rules_stats(rules, itemsets, len(ryzd))) #下面这个函数改变了rules,把rules用完了!
printResult = dealResult(result)
#################################################
# 下面将结果保存成excel格式的文件
dfToSave = ResultDFToSave(result)
dfToSave.to_excel(r'C:\Users\Administrator\Desktop\2.xlsx')
#######################################################
# 下面是根据不同置信度和关联度得到关联规则数目
listTable = []
supportRate = 0.001
confidenceRate = 0.1
for i in range(9):
support = supportRate*(i+1)
listS = []
for j in range(9):
confidence = confidenceRate*(j+1)
itemsets = dict(oaf.frequent_itemsets(ryzd, support))
rules = list(oaf.association_rules(itemsets, confidence))
listS.append(len(rules))
listTable.append(listS)
dfList = pd.DataFrame(listTable,index = [supportRate*(i+1) for i in range(9)],columns=[confidenceRate*(i+1) for i in range(9)])
dfList.to_excel(r'C:\\Users\Administrator\Desktop\outlunwen.xlsx')
| [
"1044801968@qq.com"
] | 1044801968@qq.com |
de3c80a29cae8376ab53c57e9b03610ba50b9701 | fdafd2ef8a26a3e9ee6a4016ec6272516d64168f | /zeta_python/completed/2161.py | 15de788098d2c3241e90ba87cd8ff4e261a537c9 | [] | no_license | yenru0/CodeObjecct | 322d669d9e70b7202e5e527cda27da0b1e8f273d | b9d5260b973d7435c089c49bc8867be5d2be4d85 | refs/heads/master | 2021-06-28T06:13:57.978205 | 2021-03-13T00:47:53 | 2021-03-13T00:47:53 | 221,762,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | from collections import deque
def solve(N):
queue = deque(range(1, N + 1))
cnt = 1
ret = []
while len(queue) != 1:
if cnt % 2 == 0:
queue.append(queue.popleft())
else:
ret.append(queue.popleft())
cnt += 1
cnt %= 2
ret.append(queue[0])
return " ".join(map(str, ret))
if __name__ == "__main__":
print(solve(int(input())))
| [
"yenru0604@gmail.com"
] | yenru0604@gmail.com |
797e0c3022cbfa099c71ee491210295c6c2e5f00 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /YnsBcniRG9k77SSvA_10.py | 16e0a01c189baa0333bd56f0825414d0eee0fe66 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """
Imagine a school that kids attend for 6 years. In each year, there are five
groups started, marked with the letters _a, b, c, d, e_. For the first year,
the groups are _1a, 1b, 1c, 1d, 1e_ and for the last year, the groups are _6a,
6b, 6c, 6d, 6e_.
Write a function that returns the groups in the school by year (as a string),
separated with a comma and a space in the form of `"1a, 1b, 1c, 1d, 1e, 2a, 2b
(....) 5d, 5e, 6a, 6b, 6c, 6d, 6e"`.
### Examples
print_all_groups() ➞ "1a, 1b, 1c, 1d, 1e, 2a, 2b, 2c, 2d, 2e, 3a, 3b, 3c, 3d, 3e, 4a, 4b, 4c, 4d, 4e, 5a, 5b, 5c, 5d, 5e, 6a, 6b, 6c, 6d, 6e "
### Notes
Use nested "for" loops to achieve this, as well as the array of `["a", "b",
"c", "d", "e"]` groups.
"""
def print_all_groups():
lst = ["a", "b", "c", "d", "e"]
r = ''
for n in range(1, 7):
for ch in lst:
if ch == 'e' and n == 6:
break
r += str(n) + ch + ', '
r += '6e'
return r
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
bfc4c70e5c4e5c20ff937ed96aa580ee5ad5d6f9 | 1c3ce4f21ce62ecfe5f0cfe1281ad912645b39bf | /genename2lrgref.py | 89174d67bb82b9aff523d8371876e93dfe7727e1 | [] | no_license | tz2614/softwarecarpentryworkshop | 6694d8d02ebbc71c74786e6ab477bdac716cdccf | c54301c7cc4ea890275f2a159322a4e5f0e39560 | refs/heads/master | 2021-08-08T03:19:18.534877 | 2017-11-09T12:44:08 | 2017-11-09T12:44:08 | 109,675,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | def genename2LRGref(gene):
#open and read LRGref list text file
f = open(lrgtext, "r")
#create a list of genenames, and a list of LRGrefs
genenames = []
LRGref_list = []
# iterate over the lines in the file
for line in f:
# split the line into a list of column values
if line startswith ("#"):
pass
if line startswith ("LRG_")
columns = line.split(' ')
# clean any whitespace off the items
columns = [col.strip() for col in columns]
# ensure the column has at least one value before printing
if columns:
print "first", columns[0] # print the first column
print "last", columns[-1] # print the last column
for line in fh:
line = line.rstrip()
#if line starts with ">", add it to seq_names.
if line.startswith(">"):
seq_names.append(line)
#print seq_names
# if seq_data is not empty, add the line to seq_data string
if seq_data:
seq_list.append(seq_data)
seq_data = ""
else:
seq_data += line
#for the last sequence, as there are no more ">" in the text, the sequence
#will not be appended to seq_data, hence this extra line need to be added.
seq_list.append(seq_data)
#check the seqs appended are ok
#print seq_list
#sort seqs according to length
seq_list = sorted(seq_list, key=len)
#print seq_list
#assign shortest seq to a variable
shortestk = seq_list[0]
print shortestk
kmers = []
str_len = len(shortestk)
# Iterate over kmer lengths
for kmer_len in range(1, str_len+1)[::-1]:
#print "Length", kmer_len
# Iterate over start position for that kmer length
for pos in range(0, (str_len-kmer_len)+1):
#print "Start position", pos
#assign the current kmer within shortestk to a variable
kmer = shortestk[pos:(pos+kmer_len)]
#append the kmer to the kmers list
kmers.append(kmer)
#sort the list of kmers according to length, while making it a unique list.
kmers = sorted(set(kmers), key=len)[::-1]
#print kmers
# search each kmer in each fasta sequences, in order, until one is found in every sequence.
# As kmers list start with the longest kmer, this should return the longest kmer within seqs.
for kmer in kmers:
#print "KMER", kmer
kmerfound = True
for seq in seq_list:
#print "SEQ", seq
if kmer not in seq:
kmerfound = False
break
if kmerfound:
print
print kmer
print
return kmer
textfile = "~/LRGproject/list_LRG_GRCh37.txt"
sharedmotif(fastafile)
| [
"tony_zheng35@hotmail.com"
] | tony_zheng35@hotmail.com |
c1b06abef4ce43d403bca1d6da1c63f136747d47 | 2b6715706ca85570e23188d7ffcb716e8e204f1b | /00Python代码/03Pentest_通过搜索引擎搜索关键字/enginesearchV2.0.py | af27d72c3cea7ef56fee73b6fcba746af7592aaf | [] | no_license | thinks520/CodeRecord | 3c2e9e11082ec305dc3352a2b4a034795f2b2182 | a9d32b9761de7a21030765a9f4ad41df94b88c63 | refs/heads/master | 2020-03-23T14:27:39.166999 | 2018-07-19T14:12:25 | 2018-07-19T14:12:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #!/usr/bin/python
#coding:utf-8
import requests
from lxml import etree
input=open('fangong.txt','r')
output1=open('result-10086.txt','w+')
for param in input:
baseurl='http://www.baidu.com/s?wd=inurl:10086.cn intext:"'+param+'"&rsv_spt=1&rsv_iqid=0xfbe8f0570001a8a4&issp=1&f=8&rsv_bp=0&rsv_idx=2&ie=utf-8&tn=baiduhome_pg&rsv_enter=1&rsv_sug3=1'
s=requests.get(baseurl)
r=s.content
html=etree.HTML(r)
result=html.xpath('//div[@class="nors"]/p')
try:
print result[0].text,param
except IndexError:
print baseurl
output1.writelines(baseurl+'\n'+'\n')
output1.close()
output2=open('result-12582.txt','w+')
for param in input:
baseurl='http://www.baidu.com/s?wd=inurl:12582.cn intext:"'+param+'"&rsv_spt=1&rsv_iqid=0xfbe8f0570001a8a4&issp=1&f=8&rsv_bp=0&rsv_idx=2&ie=utf-8&tn=baiduhome_pg&rsv_enter=1&rsv_sug3=1'
s=requests.get(baseurl)
r=s.content
html=etree.HTML(r)
result=html.xpath('//div[@class="nors"]/p')
try:
print result[0].text,param
except IndexError:
print baseurl
output2.writelines(baseurl+'\n'+'\n')
output2.close()
input.close() | [
"ljressrg@gmail.com"
] | ljressrg@gmail.com |
87e16b4002f252ff4e14773750a8d2a08e3f95b5 | 17ca5bae91148b5e155e18e6d758f77ab402046d | /M_BH_relation/read_MBH_form.py | 569af31bc6704cb0978eb63f565b592b46f0f811 | [] | no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,249 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 09:34:27 2018
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
f = open("fmos_MBH_table","r")
with f as g:
lines = g.readlines()
porp_list = lines[0].replace('#','').split(' ')
samples = [lines[i].split(' ') for i in range(1,len(lines))]
#for i in range(len(samples)):
ID = ['CDFS-1', 'CID543','CID70', 'SXDS-X735', 'CDFS-229', 'CDFS-321', 'CID1174',\
'CID216', 'CID237','CID3242','CID3570','CID452', 'CID454',\
'CID50','CID607','LID1273', 'LID1538','LID360','SXDS-X1136',\
'SXDS-X50', 'SXDS-X717','SXDS-X763','SXDS-X969','XID2138','XID2202',\
'XID2396', 'CID206', 'ECDFS-358',\
]
#==============================================================================
# ############ load the find the serial NO. for the list##########
#==============================================================================
ID_ser_dic = {}
#XID2202 to LID1622
#XID2138 to LID1820
#XID2396 to LID1878
#CDFS321 to ECDFS321
MB_ID = ['CDFS-1', 'CID543','CID70', 'SXDS-X735', 'CDFS-229', 'ECDFS-321', 'CID1174',\
'CID216', 'CID237','CID3242','CID3570','CID452', 'CID454',\
'CID50','CID607','LID1273', 'LID1538','LID360','SXDS-X1136',\
'SXDS-X50', 'SXDS-X717','SXDS-X763','SXDS-X969','LID1820','LID1622',\
'LID1878', 'CID206', 'ECDFS-358']
for j in range(len(ID)):
count = 0
for i in range(len(samples)):
if samples[i][1] == MB_ID[j]:
ID_ser_dic.update({ID[j]:i})
count += 1
if count == 0:
ID_ser_dic.update({ID[j]: -99})
##==============================================================================
## Print on the props on one sample
##==============================================================================
#tar_in = 3
#t_name = ID[tar_in]
#ser = ID_ser_dic[t_name]
#print "information for {0}".format(t_name)
#for i in range(len(samples[0])):
# print 'serial{0}'.format(i), porp_list[i], samples[ser][i]
#
##==============================================================================
## Comparing the Ha Hadr, Hb, Hbdr
##==============================================================================
#for target in ID:
# t_name = target
# if ID_ser_dic[t_name] != -99:
# ser = ID_ser_dic[t_name]
# print 'target, Ha Hadr, Hb, Hbdr', float(samples[ser][5])-float(samples[ser][6]), float(samples[ser][12])-float(samples[ser][13])
#
#for tar_in in range(len(ID)):
# #==============================================================================
# # test M_BH by Ha
# #6.71+0.48*(43.73806-42)+2.12*np.log10(4481.164/1000)
# #==============================================================================
# t_name = ID[tar_in]
# ser = ID_ser_dic[t_name]
# print "Cal MBH_Ha for {0}".format(t_name)
# if samples[ser][10] != 0:
# FWMH_a = float(samples[ser][8])
# logLHadr = float(samples[ser][6])
# cal_logMa = 6.71+0.48*(logLHadr-42)+2.12*np.log10(FWMH_a/1000)
# print float(cal_logMa) - float(samples[ser][10])
#
# #==============================================================================
# # test M_BH by Ha
# #6.71+0.48*(43.73806-42)+2.12*np.log10(4481.164/1000)
# #==============================================================================
# t_name = ID[tar_in]
# ser = ID_ser_dic[t_name]
# if samples[ser][21] != 0:
# FWMH_b = float(samples[ser][19])
# logL5100dr = float(samples[ser][16])
# cal_logMb = 6.91+0.5*(logL5100dr-44)+2.*np.log10(FWMH_b/1000)
# if float(samples[ser][21]) != 0:
# print "Cal MBH_Hb for {0}".format(t_name)
# print float(cal_logMb) - float(samples[ser][21])
diff = []
for i in range(len(samples)):
if float(samples[i][10]) != 0 and float(samples[i][21]) != 0:
print i
FWMH_a = float(samples[i][8])
logLHadr = float(samples[i][6])
cal_logMa = 6.71+0.48*(logLHadr-42)+2.12*np.log10(FWMH_a/1000)
FWMH_b = float(samples[i][19])
logL5100dr = float(samples[i][16])
cal_logMb = 6.91+0.5*(logL5100dr-44)+2.*np.log10(FWMH_b/1000)
diff.append(cal_logMa-cal_logMb)
print diff
diff = np.asarray(diff)
| [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
187862d60f6dd31623819405c6071193862cdcef | 08ab565444e52429c3ece47ee0bc014b0a04e08a | /backend/feedback/api/serializers.py | bdf744970ff552452828d7676fd6eabfa1b62c91 | [] | no_license | ScrollPage/Hackatom | 70bb2246df3c0cd7da51dbab941d3303a831f887 | 5bbeb5bf4936502f5d6d8e8b400f912583ab9d4e | refs/heads/main | 2023-03-20T05:06:07.505978 | 2021-03-15T16:58:42 | 2021-03-15T16:58:42 | 336,541,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | from rest_framework import serializers
from feedback.models import Rating
class RatingSerializer(serializers.ModelSerializer):
'''Сериализация лайка'''
class Meta:
model = Rating
fields = ['star', 'initiative']
def create(self, validated_data):
rating, _ = Rating.objects.update_or_create(
appraiser=self.context['request'].user,
initiative=validated_data.get('initiative', None),
defaults={'star': validated_data.get('star')}
)
return rating | [
"54814200+reqww@users.noreply.github.com"
] | 54814200+reqww@users.noreply.github.com |
7500c1826864a576d104db5f293a7f1ef125c122 | 31e90bec77ca264efd0867df1a8fceaa68e2749e | /chat/consumers.py | f141219d1ca17b646cdb312a39ab2b88e33646d5 | [] | no_license | safwanvk/ZabChat | c4621d45132b195bcd625647e04757e874730c98 | fb542062a1bd32e66d026e5cd0924ba166a50aec | refs/heads/master | 2022-11-19T20:32:23.201765 | 2020-07-17T15:22:01 | 2020-07-17T15:22:01 | 280,459,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
data = json.loads(text_data)
message = data['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
| [
"safwanvalakundil@gmail.com"
] | safwanvalakundil@gmail.com |
882c42cdf5dd921eca2da25a39ae8e01e16541c7 | 7df0845fdfb8597e2ed45b87a28fa61be9b63db7 | /0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py | b1f69f3fe68067a294fac544fef07864c61df95c | [] | no_license | yawzyag/holbertonschool-higher_level_programming | 0663903ea947b26e42b70892cd8ba8b1d6ef4af6 | 81036be0d13f22175f103f81fcddbf88308413c2 | refs/heads/master | 2020-05-18T02:06:39.595953 | 2019-09-26T21:59:14 | 2019-09-26T21:59:14 | 184,106,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | #!/usr/bin/python3
"""Start link class to table in database
"""
import sys
from model_state import Base, State
from model_city import City
from sqlalchemy import desc, asc
from sqlalchemy import (create_engine)
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1],
sys.argv[2],
sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
result = session.query(State, City).join(City).all()
for row, l_ in result:
print("{}: ({}) {}".format(row.name, l_.id, l_.name))
session.close()
| [
"yesid.yag@gmail.com"
] | yesid.yag@gmail.com |
6f55109e0fb795a6e582f8b960aa89fa57ad4294 | 61ef327bd1d5ff6db7595221db6823c947dab42b | /FlatData/ScenarioCharacterEmotionExcelTable.py | 2d37dc38d982bf713f5b0563564912d8e67edd9b | [] | no_license | Aikenfell/Blue-Archive---Asset-Downloader | 88e419686a80b20b57a10a3033c23c80f86d6bf9 | 92f93ffbdb81a47cef58c61ec82092234eae8eec | refs/heads/main | 2023-09-06T03:56:50.998141 | 2021-11-19T12:41:58 | 2021-11-19T12:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ScenarioCharacterEmotionExcelTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ScenarioCharacterEmotionExcelTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsScenarioCharacterEmotionExcelTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# ScenarioCharacterEmotionExcelTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ScenarioCharacterEmotionExcelTable
def DataList(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from FlatData.ScenarioCharacterEmotionExcel import ScenarioCharacterEmotionExcel
obj = ScenarioCharacterEmotionExcel()
obj.Init(self._tab.Bytes, x)
return obj
return None
# ScenarioCharacterEmotionExcelTable
def DataListLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ScenarioCharacterEmotionExcelTable
def DataListIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def Start(builder): builder.StartObject(1)
def ScenarioCharacterEmotionExcelTableStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddDataList(builder, DataList): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(DataList), 0)
def ScenarioCharacterEmotionExcelTableAddDataList(builder, DataList):
"""This method is deprecated. Please switch to AddDataList."""
return AddDataList(builder, DataList)
def StartDataListVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def ScenarioCharacterEmotionExcelTableStartDataListVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataListVector(builder, numElems)
def End(builder): return builder.EndObject()
def ScenarioCharacterEmotionExcelTableEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
"rkolbe96@gmail.com"
] | rkolbe96@gmail.com |
9b7254d8e461320bcfbb1dcb7d9fe00fccd73a73 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/54/usersdata/75/24223/submittedfiles/av1_p2_civil.py | 24b2b449ae48b2fda2bd494854cfe8e0fa7b3cee | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # -*- coding: utf-8 -*-
from __future__ import division
def abs(x):
if x<0:
x=x*(-1)
return x
else:
return x
def maior(lista):
maior=lista[0]
for i in range (0,len(lista),1):
if lista[i]>maior:
maior=lista[i]
return maior
def menor(lista):
menor=lista[0]
for i in range (0,len(lista),1):
if lista[i]<menor:
menor=lista[i]
return menor
def altura(lista,altura):
soma=abs(maior(lista)-altura)+abs(menor(lista)-altura)
return soma
n=input('Digite a quantidade de pinos da fechadura:')
m=input('DIgite a altura para desbloqueio:')
a=[]
for i in range (0,n,1):
a.append(input('Digite a altura de cada pino:'))
print ('%1.d' %(altura(lista,m)))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
d484f72f6efc4eb7626c7aacb752aea1c75a0017 | 76e931912629c37beedf7c9b112b53e7de5babd7 | /3-mouth04/总/project/day06/ddblog/topic/models.py | 167616ddd78fc3b638a7041c6b35f892c7c7bbd1 | [
"Apache-2.0"
] | permissive | gary-gggggg/gary | c59ac21d8e065f296ff986d11a0e4cbf186a1bc4 | d8ba30ea4bc2b662a2d6a87d247f813e5680d63e | refs/heads/main | 2023-02-23T06:54:34.500683 | 2021-02-01T10:17:02 | 2021-02-01T10:17:02 | 334,905,744 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from django.db import models
# Create your models here.
from user.models import UserProfile
class Topic(models.Model):
title = models.CharField('文章标题', max_length=50)
category = models.CharField('文章分类', max_length=20)
# public private
limit = models.CharField('文章权限', max_length=20)
introduce = models.CharField('文章简介', max_length=50)
content = models.TextField('文章内容')
created_time = models.DateTimeField(auto_now_add=True)
updated_time = models.DateTimeField(auto_now=True)
# 1对多的外键
user_profile = models.ForeignKey(UserProfile,
on_delete=models.CASCADE)
| [
"673248932@qq.com"
] | 673248932@qq.com |
191612a710061c4420bd155af34bf77bd96caf71 | 43aece1d354d6cfbacb22f30eae1d7ff1de83b09 | /moka/tests.py | 149ecb173d749b1edea6f1a87601cadd5598576a | [
"MIT"
] | permissive | harpiya/moka | 230342914b897fd373890e90497ca52acc8f7a96 | 220c4501dc37cc8db8213f2275a071d7b5bc3f69 | refs/heads/master | 2020-04-17T09:43:00.771430 | 2019-01-20T18:52:51 | 2019-01-20T18:52:51 | 166,470,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # @Author: Saadettin Yasir AKEL <developer>
# @Date: 2019-01-20T17:55:20+03:00
# @Email: yasir@harpiya.com
# @Project: Harpiya Kurumsal Yönetim Sistemi
# @Filename: tests.py
# @Last modified by: developer
# @Last modified time: 2019-01-20T18:33:16+03:00
# @License: MIT License. See license.txt
# @Copyright: Harpiya Yazılım Teknolojileri
from unittest import TestCase
from . import moka_checkout
import frappe
class TestMokaCheckout(TestCase):
def test_set_moka_checkout(self):
moka_checkout.set_moka_checkout(100, "USD")
self.assertEquals(frappe.local.response["type"], "redirect")
| [
"yasir@harpiya.com"
] | yasir@harpiya.com |
4b9bd5ab17684670b1321f414c01d15bf2d024c0 | 95363198c3af43c3cc74ef8131d556e566c75f7a | /diffusion_utils/utils/paths.py | acdad56e8e51202469e6d1b71805a9cb81a86b6c | [] | no_license | shinypond/multinomial_diffusion | 59fde847fd0afea3c4668d9481ea9e4e646be787 | 66f17340e4cd200059bff228cf98a597bf084c26 | refs/heads/main | 2023-08-03T15:23:47.578895 | 2021-09-11T12:10:15 | 2021-09-11T12:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import os
import sys
def add_parent_path(level=1):
script_path = os.path.realpath(sys.argv[0])
parent_dir = os.path.dirname(script_path)
for _ in range(level):
parent_dir = os.path.dirname(parent_dir)
sys.path.insert(0, parent_dir)
def add_parent_paths(levels=[1,2]):
for level in levels:
add_parent_path(level=level)
| [
"e.hoogeboom@gmail.com"
] | e.hoogeboom@gmail.com |
ba423c85f30eb13f5611de3d74f83b05a0a19409 | 7ef5bb39938e669b5571a097f01d96ee53458ad6 | /clone_graph/solution2.py | ef2aa47e3dce48a03fd9dbd320701e39f4c2005d | [
"BSD-2-Clause"
] | permissive | mahimadubey/leetcode-python | 61cd135515b26644197b4736a92a53bb1a5870a6 | 38acc65fa4315f86acb62874ca488620c5d77e17 | refs/heads/master | 2020-08-29T09:27:45.232412 | 2019-10-28T08:06:52 | 2019-10-28T08:06:52 | 217,993,547 | 0 | 0 | BSD-2-Clause | 2019-10-28T07:55:38 | 2019-10-28T07:55:38 | null | UTF-8 | Python | false | false | 1,272 | py | """
Clone an undirected graph. Each node in the graph contains a label and a list
of its neighbors.
"""
# Definition for a undirected graph node
# class UndirectedGraphNode(object):
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution(object):
def cloneGraph(self, node):
"""
:type node: UndirectedGraphNode
:rtype: UndirectedGraphNode
DFS
"""
if node is None:
return None
self.visited = set()
cloned_node = UndirectedGraphNode(node.label)
self.d = {node: cloned_node}
self.visit(node)
return self.d[node]
def visit(self, node):
if node not in self.visited:
self.visited.add(node)
cloned_node = self.d[node]
cloned_neighbors = []
for neighbor in node.neighbors:
if neighbor not in self.d:
cloned_neighbor = UndirectedGraphNode(neighbor.label)
self.d[neighbor] = cloned_neighbor
else:
cloned_neighbor = self.d[neighbor]
cloned_neighbors.append(cloned_neighbor)
self.visit(neighbor)
cloned_node.neighbors = cloned_neighbors
| [
"shichao.an@nyu.edu"
] | shichao.an@nyu.edu |
d022f4d82a5a6e613b95c709b5771d693fb92b4c | 9b02c05a71be741d8c33b59890a1fc9af51b3ba8 | /items_Log_to_Board.py | bb1b6b0ffa75920cf510403be43682fd93da452e | [] | no_license | craymaru/ultima-online-razor-enhanced-scripts | a524cfcfcf5ae0c780528a4cc95005a4817655f5 | 2190b6424480d035c1ea8adda97a956f66e1609d | refs/heads/master | 2022-12-10T09:21:43.664742 | 2022-11-27T15:17:11 | 2022-11-27T15:17:11 | 240,029,926 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | Misc.SendMessage("AXE?", 53)
axe_serial = Target.PromptTarget()
log_id = 0x1BDD
board_id = 0x1BD7
log_colors = {
0x0000: "Log",
0x07da: "Ork",
0x04a7: "Ash",
0x04a8: "Yew",
0x04a9: "Heartwood",
0x04aa: "Bloodwood",
0x047f: "Frostwood"
}
# DEFINES
def PutItemToBank(item_id, amount):
item = Items.FindByID(item_id, -1, Player.Backpack.Serial)
if item:
Items.Move(item, Player.Bank.Serial, amount)
Misc.Pause(700)
def GetItemFromBank(item_id, amount):
item = Items.FindByID(item_id, -1, Player.Bank.Serial)
if item:
Items.Move(item, Player.Backpack.Serial, amount)
Misc.Pause(500)
def LogToBoard(axe_serial):
for log_color in log_colors.keys():
item = Items.FindByID(log_id, log_color, Player.Backpack.Serial)
if item:
Target.Cancel()
Misc.Pause(50)
Misc.SendMessage(log_id)
Items.UseItem(axe_serial)
Target.WaitForTarget(1000, False)
Target.TargetExecute(item)
Misc.Pause(500)
Player.ChatSay(12, "bank")
while True:
LogToBoard(axe_serial)
PutItemToBank(board_id, 0)
GetItemFromBank(log_id, 200) | [
"craymaru@gmail.com"
] | craymaru@gmail.com |
6f8e46a58facc95761a795ca1509a66b63b362aa | 7d43ba52d958537905cfdde46cc194a97c45dc56 | /PL/Python/library/operating_system/standart_library/os.py | 41f4e75f18ffc7a0d2aa74c2dc8d78be54553b8e | [] | no_license | Koshmatova/workbook | 3e4d1f698a01f2be65c1abc83ee251ebc8a6bbcd | 902695e8e660689a1730c23790dbdc51737085c9 | refs/heads/master | 2023-05-01T02:30:46.868027 | 2021-05-10T03:49:08 | 2021-05-10T03:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,187 | py | os
#по сути запускает nt на win и posix На других ос
#СОДЕРЖИТ дескрипторные файлы
.name:'posix'|'nt'|'mac'|'os2'|'ce'|'java'
#имя ос
.environ:dict
#mutable словарь v окружения
#obj сопоставления(proxy?) с dict v пользовательской среды
#позволяет добавлять/удалять v окружения
.getenv(key, default=None)
#получение v окружения, в ОТЛ от .environ !>> exept при !СУЩ v
#~ os.environ.get("key")
.getlogin()
#Unix:username вошедшего в терминал
.getpid() -> current_process_pid
.uname()
#информация об ос
#>> obj ВКЛЮЧ attrs
.sysname
#os name
.nodename
#имя машины в сети
#?определяется реализацией
.release
#?релиз
.version
.machine
#?id машины
.access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True)
#проверка доступа текущего пользователя к obj
#?флаги(mode?)
os.F_OK
#obj СУЩ
os.R_OK
#obj доступен на чтение
os.W_OK
#obj доступен на запись
os.X_OK
#obj доступен на exe
.chdir(path)
#смена текущей dir
.chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
#ИЗМ прав доступа к obj
mode
#oct int
.chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True)
#Unix:ИЗМ id владельца & группы
.getcwd() -> current_working_dir
.link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True)
#создание жеской ссылки
.listdir(path=".")
#список файлов и dirs в директории
.makedirs()
#
.mkdir(path, mode=0o777, exist_ok=False)
#создание dir с созданием промежуточных директорий
.remove(path, *, dir_fd=None)
#?удаление файла
.rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
#переимонование obj
#без замены в случае СУЩ?
.renames(old, new)
#переименование с создание промежуточных dirs
#без замены в случае СУЩ?
.replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
#переимонование с принудительной заменой
.rmdir(path, *, dir_fd=None)
#удаление пустой dir
.removedirs(path)
#рекурсивно(?{xn}) удаляет директории в пути от последней пока они пусты
.putenv()
#
.symlink(source, link_name, target_is_directory=False, *, dir_fd=None)
#создать симлинк
.startfile()
#
.sync()
#Unix:запись ВСЕХ данных на диск
.truncate(path, length)
#обрезать файл до length
.utime(path, times=None, *, ns=None, dir_fd=None, follow_symlink=True)
#ИЗМ времени последнего доступа/ИЗМ файла
times:(access:secs, mod:secs)
#исключает ns
ns:(access:nsecs, mod:nsecs)
#исключает times
.popen(cmd,mode='r',bufferung=-1)
#запуск команд оболочки и(в отличие от .system) предоставляет интерфейс(подобный файлам) для чтения/записи в их стандартные потоки данных в отдельном процессе,подключается к потокам вывода программы
#похож на fileobj но значетельно отличается интерфейсами
f = os.popen('dir')
f >> <os._wrap_close obj ...>
f.readline() >> ' ’®¬ ў гбва®©б⢥ C \xadҐ Ё¬ҐҐв ¬ҐвЄЁ.\n'
bytes(f.readline(),'866') >> build\n #Не сработает для кириллицы
#or
os.popen('chcp') >> 866
os.popen('chcp 65001')
os.popen('chcp') >> 65001 #кодировка сохраняется для одного процесса
#как это закрепить?
#2.X кажется имеет свой итератор
f = os.popen()
f is iter(f) >> True
f.next() >> #работает
next(f) >> #работает
#3.x не имеет своего итератора, но умеет next
f = os.popen('dir')
f >> <os._wrap_close obj ...>
f.__next__() >> #работает
next(f) >> TypeErr: '_wrap_close' obj is not an iterator
f is iter(f) >> False #действительно не итератор
#истощается ?
f = os.popen('chcp')
f.readline() >> 'Active character encoding 866'
f.readline() >> ''
f.seek(0) >> ''
os.popen('systeminfo') >> запускает консоль(внутри py.exe),но сжирает весь вывод - результат не отображается без ручного вывода
#выполняет команду, возвращает <os._wrap_close obj> и продолжает выполнение в отличие от os.system
#может использоваться для
чтения результатов сконструированной командной строки фиксирующей продолжительнсть выполнения кода(timeit?)(лутц:глава 21)
сравнение выводов тестируемых сценариев(лутц: глава 25)
.open() -> int #descriptor
#open a file a low level io
.walk(top, topdown=True, onerror=None, followlinks=False) -> <generator object walk>
#генерация имен файлов дерева каталогов
topdown
topdown=True
#генерация сверху вниз
#для КАЖД каталога >> (path, dirs_list, files_list)
#старндартная генераторная(yield+yield from) fx рекурсивного прохода по каталогам -> не требует ожидания обхода всего дерева, а выдает результаты по мере поступления
#возвращает кортеж кортежей вида
(('dir_name',(contain_dir,...),[files,...]),...)
#на КАЖД уровне дерева каталогов
.system(command) -> int
#ПРИНАДЛЕЖ nt
#запуск команды оболочки в отдельном процессе, ждет завершения процесса прежде чем возобновить поток выполнения
#возвращает 0 в случае успеха, 1 в случае неудачи, например отсутсвие команды;не возвращает вывод и не открывает консольные окна
os.system('notepad') >> открывает notepad и ждет
os.system('cmd') >> idle:ждет;из файла - запускает cmd
os.system('dir') >> 0
.urandom(n)
#>> n случайных байт
#исп в криптографических целях
.path
#модуль
#работа с путями | [
"mkone112@gmail.com"
] | mkone112@gmail.com |
216375b04dadfc2f8857cb1a6300c1c69a16f350 | c61a28aba19f7cdf9a5127e8a782bf115c265e70 | /env/bin/csscapture | 84fbd44daf929a1a716e351d19990cf1389614cf | [] | no_license | sharmilaviji/RecruitPRO-NEW | fa72c8fc00f469a41798b1047c11dcc470fbc495 | dcfaedebe56b45acd6ddcab7e24c939b853a2c8c | refs/heads/master | 2021-05-26T12:14:12.611154 | 2020-04-27T04:40:50 | 2020-04-27T04:40:50 | 254,125,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/sharmila/frappe-bench/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from cssutils.scripts.csscapture import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sharmiviji1997@gmail.com"
] | sharmiviji1997@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.