blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d4b5f3283fdc684ada01e6fe923d14cf91de7a8 | 2ca07aecfa6ff25b0baae6dc9a707a284c2d1b6d | /common/sectools/sectools/common/crypto/functions/utils/__init__.py | 77a586234824b6d3c65e3214d2a6f28fa93e4306 | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zhilangtaosha/msm8996-wp-1-0_test_device | ef05af263ba7955263ff91eb81d45b2437bc492e | 6af9b44abbc4a367a9aaae26707079974c535f08 | refs/heads/master | 2023-03-19T02:42:09.581740 | 2021-02-21T01:20:19 | 2021-02-21T01:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # ===============================================================================
#
# Copyright (c) 2013-2016 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
#
# ===============================================================================
'''
Created on Oct 25, 2014
@author: hraghav
'''
import abc
import subprocess
from sectools.common.crypto.functions import FuncImplDiscovery, FUNC_UTILS
class UtilsImplDiscovery(FuncImplDiscovery):
@classmethod
def is_plugin(cls):
return True
@classmethod
def func(cls):
return FUNC_UTILS
@classmethod
def modules(cls):
modules = {}
try:
from sectools.common.crypto.discovery.defines import MOD_OPENSSL
from sectools.common.crypto.functions.utils.openssl import UtilsOpenSSLImpl
modules[MOD_OPENSSL] = UtilsOpenSSLImpl
except Exception:
pass
return modules
class UtilsBase(object):
FORMAT_DER = 'der'
FORMAT_PEM = 'pem'
FORMATS_SUPPORTED = [FORMAT_DER, FORMAT_PEM]
HASH_ALGO_SHA1 = 'sha1'
HASH_ALGO_SHA2 = 'sha2'
def __init__(self, module):
self.module = module
@abc.abstractmethod
def hash(self, hashing_algorithm, file_to_hash):
pass
| [
"lonelyjskj@gmail.com"
] | lonelyjskj@gmail.com |
a6107df3c15761e8c74b5b2a07292c4f084d3f81 | 1c14f85aa4f9c98362568d5cd30cf56f814a54a0 | /user_accounts/migrations/0006_auto_20180302_1721.py | 451d5345628601842ea74db9e9ccfdd8a41c3992 | [] | no_license | dimyG/zakanda_public | 26da2e8c70e34b97097f71e57fd0e732bbeac5a0 | 0445ad0c4f23aa40a472afb7ae3f1dd96e9e009d | refs/heads/master | 2022-11-30T14:01:28.132512 | 2020-08-07T14:54:10 | 2020-08-07T14:54:10 | 285,815,022 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user_accounts', '0005_auto_20180226_1732'),
]
operations = [
migrations.AlterField(
model_name='basicstats',
name='bet_yield',
field=models.FloatField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_bet_groups',
field=models.PositiveIntegerField(default=1, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_bets',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_followers',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_following',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_losses',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_opens',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='num_wins',
field=models.PositiveIntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='roi',
field=models.FloatField(default=0, null=True),
),
migrations.AlterField(
model_name='basicstats',
name='score',
field=models.FloatField(default=0, null=True),
),
]
| [
"dimgeows@gmail.com"
] | dimgeows@gmail.com |
ca3bf979f3f95bb0227e30197227edeb4ad65fdb | 94bd1e0d8caae89e8ae7b917d78fb241df650d46 | /gfssi_e03_ssi_area.py | 4aafab0014b207a178784ba5f664388ebe853e80 | [
"MIT"
] | permissive | NingAnMe/GFSSI | 79d36449764a96482b9e2bc3224675c7160ddcd3 | 066ac3dcffe04927aa497ee8b2257bee3ec3789a | refs/heads/master | 2021-06-23T22:00:24.367544 | 2021-02-05T06:10:44 | 2021-02-05T06:10:44 | 196,914,413 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,020 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/8/12
@Author : AnNing
"""
import os
import h5py
import numpy as np
from lib.lib_read_ssi import FY4ASSI, FY3DSSI
from lib.lib_constant import FULL_VALUE
from lib.lib_get_index_by_lonlat import get_data_by_index, get_area_index
def _write_out_file(out_file, result):
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
valid_count = 0
for key in result:
if result[key] is None:
continue
else:
valid_count += 1
if valid_count == 0:
print('没有足够的有效数据,不生成结果文件')
return
# try:
compression = 'gzip'
compression_opts = 5
shuffle = True
with h5py.File(out_file, 'w') as hdf5:
for dataset in result.keys():
data = result[dataset]
if data is not None:
data[np.isnan(data)] = FULL_VALUE
hdf5.create_dataset(dataset,
dtype=np.float32, data=result[dataset], compression=compression,
compression_opts=compression_opts,
shuffle=shuffle)
print('成功生成HDF文件 >>>:{}'.format(out_file))
# except Exception as why:
# print(why)
# print('HDF写入数据错误')
# os.remove(out_file)
def area(in_file, out_file, left_up_lon=None, left_up_lat=None, right_down_lon=None, right_down_lat=None,
resolution_type=None, resultid=None):
print('area <<< :{}'.format(in_file))
if not os.path.isfile(in_file):
print('数据不存在:{}'.format(in_file))
return
out_path = os.path.dirname(out_file)
if not os.path.isdir(out_path):
os.makedirs(out_path)
if 'fy4a' in resultid.lower() and '4km' in resolution_type.lower():
loader = FY4ASSI
lons = FY4ASSI.get_longitude_4km()
lats = FY4ASSI.get_latitude_4km()
elif 'fy4a' in resultid.lower() and '1km' in resolution_type.lower():
loader = FY4ASSI
lons = FY4ASSI.get_longitude_1km()
lats = FY4ASSI.get_latitude_1km()
elif 'fy3d' in resultid.lower() and '1km' in resolution_type.lower():
loader = FY3DSSI
lons = FY3DSSI.get_longitude_1km()
lats = FY3DSSI.get_latitude_1km()
else:
raise ValueError('不支持此分辨率: {}'.format(resolution_type))
data_all = {
'SSI': None,
'DirSSI': None,
'DifSSI': None,
'G0': None,
'Gt': None,
'DNI': None,
'Latitude': None,
'Longitude': None,
}
try:
datas = loader(in_file)
data_get = {
'SSI': datas.get_ssi,
'DirSSI': datas.get_ib,
'DifSSI': datas.get_id,
'G0': datas.get_g0,
'Gt': datas.get_gt,
'DNI': datas.get_dni,
'Latitude': lats,
'Longitude': lons,
}
(row_min, row_max), (col_min, col_max) = get_area_index(lons=lons, lats=lats, left_up_lon=left_up_lon,
left_up_lat=left_up_lat, right_down_lon=right_down_lon,
right_down_lat=right_down_lat)
for dataname in data_all:
if callable(data_get[dataname]):
data = data_get[dataname]()
else:
data = data_get[dataname]
data_all[dataname] = get_data_by_index(data=data, row_min=row_min, row_max=row_max,
col_min=col_min, col_max=col_max)
except Exception as why:
print(why)
print('选取数据过程出错,文件为:{}'.format(in_file))
return
try:
_write_out_file(out_file, data_all)
except Exception as why:
print(why)
print('输出结果文件错误')
return
return out_file
| [
"ninganme@qq.com"
] | ninganme@qq.com |
89c6129f1154222ee78183c7ae79f9ac99733671 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/heatmap/colorbar/_separatethousands.py | de1cd8c74ee61d486a9c57a33353a8d2f030d73c | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 455 | py | import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="separatethousands", parent_name="heatmap.colorbar", **kwargs
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
d810867bcfc8c9188e598edd86bf8ba9c1bd4c83 | 71aad099dc4fce3f4afebf0b38d57d11fddabeff | /fabfile.py | 98bacd1656f30e139d9b31cf99c310abd87fc151 | [] | no_license | spiritEcosse/neatapps | e683a0ca746f78f1f0e096ac10ce4f73ed3bcf58 | 623bcac577b7a4d3ea1bfea32c16cbfa39ad37a6 | refs/heads/master | 2020-04-22T09:59:05.586784 | 2015-10-07T12:40:22 | 2015-10-07T12:40:22 | 170,289,874 | 0 | 0 | null | 2019-05-21T08:44:56 | 2019-02-12T09:28:42 | CSS | UTF-8 | Python | true | false | 2,155 | py | __author__ = 'igor'
from fabric.api import local, run, cd, settings
import os
from neatapps.settings import BASE_DIR
from fabric.state import env
from neatapps.settings_local import HOSTS
env.user = 'root'
env.skip_bad_hosts = True
env.warn_only = False
env.parallel = True
env.shell = "/bin/bash -l -i -c"
REQUIREMENTS_FILE = 'requirements.txt'
def deploy():
"""
deploy project on remote server
:return:
"""
local_act()
update_requirements()
remote_act()
def remote_act():
"""
run remote acts
:return: None
"""
for host, dir_name in HOSTS:
with settings(host_string=host):
with cd(dir_name):
run("git reset --hard")
run("kill -9 $(ps -ef|grep -v grep |grep 'neatapps' | awk '{print $2}')")
run("neatapps")
def local_act():
"""
prepare deploy
:return: None
"""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neatapps.settings")
activate_env = os.path.expanduser(os.path.join(BASE_DIR, ".env/bin/activate_this.py"))
execfile(activate_env, dict(__file__=activate_env))
local("./manage.py test")
local("./manage.py compilemessages")
local("./manage.py makemigrations")
local("./manage.py migrate")
local("%s%s" % ('pip freeze > ', REQUIREMENTS_FILE))
local("./manage.py collectstatic --noinput -c")
local("git add .")
local("git commit -a -F git_commit_message")
current_branch = local("git symbolic-ref --short -q HEAD", capture=True)
if current_branch != 'master':
local("git checkout master")
local("git merge %s" % current_branch)
local("git branch -d %s" % current_branch)
local("git push origin")
local("git push production")
local("git push my_repo_neatapps_bit")
local("git push my-production")
def update_requirements():
"""
install external requirements on remote host
:return: None
"""
for host, dir_name in HOSTS:
with settings(host_string=host):
with cd(dir_name):
run('%s && %s%s' % ('source .env/bin/activate', 'pip install -r ', REQUIREMENTS_FILE))
| [
"shevchenkcoigor@gmail.com"
] | shevchenkcoigor@gmail.com |
c474e70bdbc4fb0f290fde3f5c81eb82caf24564 | 406e59a45b07c36717073ff2a446a7d5ce8057db | /data_visual/chapter15/dice_visual.py | bc071f90bf277b590d6b6342b524b89d3a5aa4df | [] | no_license | 15032373556/mystudy | ea074802c03ac3efe2577871a39e54f0a81c7b92 | 08861c40633b5cd93122248b676adbc4748bed7d | refs/heads/master | 2022-12-05T18:33:26.058627 | 2020-08-31T10:25:20 | 2020-08-31T10:25:20 | 291,670,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | import pygal
from die import Die
from sum_num import sum_2,sum_3,multip
#创建两个D6骰子
die_1 = Die()
die_2 = Die()
# #创建两个D8骰子
# die_1 = Die(8)
# die_2 = Die(8)
# #创建三个D6骰子
# die_1 = Die()
# die_2 = Die()
# die_3 = Die()
#掷几次骰子,并将结果存储在一个列表中
results = []
for roll_num in range(1000):
result = die_1.roll() * die_2.roll()
#result = die_1.roll() + die_2.roll() + die_3.roll()
results.append(result)
#分析结果
frequencies = []
max_result = die_1.num_sides+1 * die_2.num_sides + 1
#max_result = die_1.num_sides+1 + die_2.num_sides + die_3.num_sides + 1
#for value in range(2,max_result):
for value in range(1, max_result):
frequency = results.count(value)
frequencies.append(frequency)
#对结果进行可视化
hist = pygal.Bar()
hist.title = "Results of rolling two D6 dice 1000 times."
# hist.x_labels = ['2','3','4','5','6','7','8','9','10','11','12']
#hist.x_labels = sum(6,6)
#hist.x_labels = sum_2(8,8)
#hist.x_labels = sum_3(6,6,6)
hist.x_labels = multip(6,6)
hist.x_title = "Result"
hist.y_title = "Frequency of Result"
# hist.add('D6 + D6',frequencies)
# hist.render_to_file('dice_visual.svg')
# hist.add('D8 + D8',frequencies)
# hist.render_to_file('dice_visual_8.svg')
# hist.add('D6 + D6 + D6',frequencies)
# hist.render_to_file('dice_visual_6.svg')
hist.add('D6 * D6',frequencies)
hist.render_to_file('dice_visual_66.svg') | [
"1798549164@qq.com"
] | 1798549164@qq.com |
faa303ad648a6c7ff8825e4cfa35fe13c27619fb | 5dd47abf7061201d9378e73e51f08fbb314ba2fd | /envdsys/envproject/migrations/0002_auto_20200326_1744.py | 2fa88af2d3c78743c8c0cdfa293983bf102cd8ba | [
"Unlicense"
] | permissive | NOAA-PMEL/envDataSystem | 4d264ae5209015e4faee648f37608d68a4461d0a | 4db4a3569d2329658799a3eef06ce36dd5c0597d | refs/heads/master | 2023-02-23T22:33:14.334737 | 2021-07-22T01:09:16 | 2021-07-22T01:09:16 | 191,809,007 | 1 | 0 | Unlicense | 2023-02-08T00:45:54 | 2019-06-13T17:50:03 | Python | UTF-8 | Python | false | false | 940 | py | # Generated by Django 2.2.1 on 2020-03-26 17:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envproject', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='Logo Image'),
),
migrations.AddField(
model_name='project',
name='website',
field=models.URLField(blank=True, null=True, verbose_name='Project Website'),
),
migrations.AlterField(
model_name='project',
name='long_name',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='project',
name='name',
field=models.CharField(max_length=100),
),
]
| [
"derek.coffman@noaa.gov"
] | derek.coffman@noaa.gov |
ddb5c9e743c62271e928590de05e16f13c7cf342 | f8db8b11ad41d246cd4a0d71b75e3a324a481b14 | /SecretPlots/assemblers/__init__.py | 039b40cfcf37663da14edd45bbf7f7bca50520be | [
"MIT"
] | permissive | secretBiology/SecretPlots | b978703497d255eb099b471fdafcea7fdf793bd3 | eca1d0e0932e605df49d1f958f98a1f41200d589 | refs/heads/master | 2022-07-07T07:00:57.625486 | 2022-01-14T05:44:54 | 2022-01-14T05:44:54 | 206,092,023 | 0 | 1 | MIT | 2022-06-21T22:45:18 | 2019-09-03T14:03:56 | Python | UTF-8 | Python | false | false | 433 | py | # SecretPlots
# Copyright (c) 2019. SecretBiology
#
# Author: Rohit Suratekar
# Organisation: SecretBiology
# Website: https://github.com/secretBiology/SecretPlots
# Licence: MIT License
# Creation: 05/10/19, 7:44 PM
#
from SecretPlots.assemblers._base import Assembler
from SecretPlots.assemblers._bars import BarAssembler, BarGroupedAssembler
from SecretPlots.assemblers._matrix import ColorMapAssembler, BooleanAssembler
| [
"rohitsuratekar@gmail.com"
] | rohitsuratekar@gmail.com |
02fc867d8ca2fded13b2e27d71b97cb8471db1c6 | 55815c281f6746bb64fc2ba46d074ca5af966441 | /medium/1261.py | c20fe47a1a06a1dbaa21b9bc1188fc7d9b490f82 | [] | no_license | brandoneng000/LeetCode | def5107b03187ad7b7b1c207d39c442b70f80fc2 | c7a42753b2b16c7b9c66b8d7c2e67b683a15e27d | refs/heads/master | 2023-08-30T23:38:04.845267 | 2023-08-30T08:42:57 | 2023-08-30T08:42:57 | 199,584,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class FindElements:
def __init__(self, root: Optional[TreeNode]):
def dfs(root: TreeNode, expected_val: int):
if not root:
return
self.tree_vals.add(expected_val)
dfs(root.left, expected_val * 2 + 1)
dfs(root.right, expected_val * 2 + 2)
self.tree_vals = set()
dfs(root, 0)
def find(self, target: int) -> bool:
return target in self.tree_vals
# Your FindElements object will be instantiated and called as such:
# obj = FindElements(root)
# param_1 = obj.find(target) | [
"brandoneng000@gmail.com"
] | brandoneng000@gmail.com |
1dbfce8ff2e4ea921f1c9038ecf9b33e745429a5 | 0213dfa0195d88fd8bfa3027a5888c2b98c79562 | /firebase_login.py | e20b5747826492447e98c86b398125b5e2bcf6ef | [] | no_license | DavidArmendariz/dash-python | 9cd02a06eab46a49ec33e430d9c470263b8d06c6 | f0574c73b635929d1842dd27834e339b048516b5 | refs/heads/master | 2023-01-02T18:43:50.976001 | 2020-11-02T16:36:13 | 2020-11-02T16:40:39 | 222,872,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import requests
_verify_password_url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword'
api_key = "your_api_key"
def sign_in_with_password(email, password):
body = {'email': email, 'password': password}
params = {'key': api_key}
try:
resp = requests.request('post', _verify_password_url, params=params, json=body)
resp.raise_for_status()
return True
except:
return False | [
"darmendariz1998@outlook.com"
] | darmendariz1998@outlook.com |
ff4498ec0589e7899a19c0ba51e30c0908698ad8 | 33b92f44ab665c6418742d3142104864312b6597 | /ValveBatchExport/ValveBatchExportRules/QuantificationResults.py | eb6e90791a3eb16bd6f5b6a3f43b9b2d28c20b84 | [
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | SlicerHeart/SlicerHeart | ebe46aec9fd00f9cee7907fa5ad2774ff3e25c93 | 09e440d1cc821e5b5f01146eddfcbd65edec7f32 | refs/heads/master | 2023-08-17T08:42:01.948700 | 2023-08-16T20:19:14 | 2023-08-16T20:19:14 | 38,175,829 | 78 | 33 | BSD-3-Clause | 2023-09-07T14:07:01 | 2015-06-27T21:23:46 | Python | UTF-8 | Python | false | false | 6,557 | py | import os
import logging
import slicer
from pathlib import Path
from collections import OrderedDict
from .base import ValveBatchExportRule
from HeartValveLib.helpers import getSpecificHeartValveMeasurementNodes, getAllFilesWithExtension
class QuantificationResultsExportRule(ValveBatchExportRule):
BRIEF_USE = "Valve quantification results (.csv)"
DETAILED_DESCRIPTION = """Export results computed in Valve quantification module. All metrics will be
recomputed using current software version
"""
WIDE_COLUMNS = ['Filename', 'Phase', 'Measurement']
LONG_COLUMNS = WIDE_COLUMNS + ['Value']
UNIT_COLUMNS = ['Measurement','Unit']
WIDE_CSV_OUTPUT_FILENAME = 'QuantificationResults_wide.csv'
LONG_CSV_OUTPUT_FILENAME = 'QuantificationResults_long.csv'
HYBRID_CSV_OUTPUT_FILENAME = 'QuantificationResults_hybrid.csv'
UNITS_CSV_OUTPUT_FILENAME = 'QuantificationUnits.csv'
OUTPUT_CSV_FILES = [
WIDE_CSV_OUTPUT_FILENAME,
LONG_CSV_OUTPUT_FILENAME,
HYBRID_CSV_OUTPUT_FILENAME,
UNITS_CSV_OUTPUT_FILENAME
]
CMD_FLAG = "-qr"
QUANTIFICATION_RESULTS_IDENTIFIER = 'Quantification results'
def processStart(self):
self.unitsDictionary = OrderedDict()
self.wideResultsTableNode = self.createTableNode(*self.WIDE_COLUMNS)
self.longResultsTableNode = self.createTableNode(*self.LONG_COLUMNS)
self.hybridTempValues = dict()
self.valveQuantificationLogic = slicer.modules.valvequantification.widgetRepresentation().self().logic
def processScene(self, sceneFileName):
for measurementNode in getSpecificHeartValveMeasurementNodes(self.QUANTIFICATION_RESULTS_IDENTIFIER):
cardiacCyclePhaseNames = self.valveQuantificationLogic.getMeasurementCardiacCyclePhaseShortNames(measurementNode)
cardiacCyclePhaseName = ''
if len(cardiacCyclePhaseNames) == 1:
cardiacCyclePhaseName = cardiacCyclePhaseNames[0]
if not cardiacCyclePhaseName in self.EXPORT_PHASES:
continue
elif len(cardiacCyclePhaseNames) > 1:
cardiacCyclePhaseName = "multiple"
if not all(phaseName in self.EXPORT_PHASES for phaseName in cardiacCyclePhaseNames):
logging.debug("Multiple phases compare measurement node found but selected phases don't match those. Skipping")
continue
# Recompute all measurements
try:
self.addLog(f"Computing metrics for '{cardiacCyclePhaseName}'")
self.valveQuantificationLogic.computeMetrics(measurementNode)
except Exception as exc:
logging.warning(f"{sceneFileName} failed with error message: \n{exc}")
import traceback
traceback.print_exc()
continue
quantificationResultsTableNode = \
self.getTableNode(measurementNode, self.QUANTIFICATION_RESULTS_IDENTIFIER)
measurementPresetId = self.valveQuantificationLogic.getMeasurementPresetId(measurementNode)
if quantificationResultsTableNode:
filename, file_extension = os.path.splitext(os.path.basename(sceneFileName))
# long data table
self.addRowData(self.longResultsTableNode, filename, cardiacCyclePhaseName, "ValveType", measurementPresetId)
# wide table
resultsTableRowIndex = \
self.addRowData(self.wideResultsTableNode, filename, cardiacCyclePhaseName, measurementPresetId)
numberOfMetrics = quantificationResultsTableNode.GetNumberOfRows()
for metricIndex in range(numberOfMetrics):
metricName, metricValue, metricUnit = self.getColData(quantificationResultsTableNode, metricIndex, range(3))
# wide data table
self.setValueInTable(self.wideResultsTableNode, resultsTableRowIndex, metricName, metricValue)
# long data table
self.addRowData(self.longResultsTableNode, filename, cardiacCyclePhaseName, metricName, metricValue)
# hybrid data table
if not metricName in list(self.hybridTempValues.keys()):
self.hybridTempValues[metricName] = dict()
if not filename in list(self.hybridTempValues[metricName].keys()):
self.hybridTempValues[metricName][filename] = dict()
self.hybridTempValues[metricName][filename][cardiacCyclePhaseName] = metricValue
self.unitsDictionary[metricName] = metricUnit
def processEnd(self):
self._writeUnitsTable()
self.writeTableNodeToCsv(self.wideResultsTableNode, self.WIDE_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
self.writeTableNodeToCsv(self.longResultsTableNode, self.LONG_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
def getPhases():
_phases = list()
for _filenames in self.hybridTempValues.values():
for __phases in _filenames.values():
_phases.extend(list(__phases.keys()))
return set(_phases)
# hybrid data table
phases = sorted(getPhases())
resultsHybridTableNode = self.createTableNode('Measurement', 'Filename', *phases)
for metricName, filenames in self.hybridTempValues.items():
for filename, values in filenames.items():
phaseValues = [values[phase] if phase in values.keys() else "" for phase in phases]
self.addRowData(resultsHybridTableNode, metricName, filename, *phaseValues)
self.writeTableNodeToCsv(resultsHybridTableNode, self.HYBRID_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
def _writeUnitsTable(self):
unitsTableNode = self.createTableNode(*self.UNIT_COLUMNS)
# iterate over units dict
for metricName, metricUnit in self.unitsDictionary.items():
self.addRowData(unitsTableNode, metricName, metricUnit)
self.writeTableNodeToCsv(unitsTableNode, self.UNITS_CSV_OUTPUT_FILENAME, useStringDelimiter=True)
def mergeTables(self, inputDirectories, outputDirectory):
unitCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.UNITS_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(unitCSVs, Path(outputDirectory) / self.UNITS_CSV_OUTPUT_FILENAME, removeDuplicateRows=True)
longCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.LONG_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(longCSVs, Path(outputDirectory) / self.LONG_CSV_OUTPUT_FILENAME)
wideCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.WIDE_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(wideCSVs, Path(outputDirectory) / self.WIDE_CSV_OUTPUT_FILENAME)
hybridCSVs = self.findCorrespondingFilesInDirectories(inputDirectories, self.HYBRID_CSV_OUTPUT_FILENAME)
self.concatCSVsAndSave(hybridCSVs, Path(outputDirectory) / self.HYBRID_CSV_OUTPUT_FILENAME) | [
"lasso@queensu.ca"
] | lasso@queensu.ca |
362055f348f5bdd9e9815a9fd4a90b337fb7e476 | 3ced55b04ec82df5257f0e3b500fba89ddf73a8a | /tests/molecular/writers/xyz/conftest.py | 4762327c235ce62b0574093cd599d68259bead29 | [
"MIT"
] | permissive | rdguerrerom/stk | 317282d22f5c4c99a1a8452023c490fd2f711357 | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | refs/heads/master | 2023-08-23T21:04:46.854062 | 2021-10-16T14:01:38 | 2021-10-16T14:01:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import pytest
import stk
from .case_data import CaseData
@pytest.fixture(
params=(
lambda: CaseData(
molecule=stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]),
writer=stk.XyzWriter(),
string=(
'8\n\nBr -1.423838 1.561473 0.322335\nC -0.740543 -0.2'
'57311 0.127980\nC 0.714791 -0.115704 -0.338259\nBr 1.'
'626726 0.889555 1.068701\nH -1.351758 -0.807456 -0.59'
'3854\nH -0.776931 -0.696380 1.144036\nH 0.769475 0.52'
'7986 -1.238698\nH 1.182078 -1.102163 -0.492240\n'
),
),
),
)
def case_data(request) -> CaseData:
return request.param()
| [
"noreply@github.com"
] | rdguerrerom.noreply@github.com |
d85bd4322220b2482ece10b207a43c99487b7f9c | bdb3716c644b8d031af9a5285626d7ccf0ecb903 | /code/UI/ClientExamples/Python/ExampleQuery_ARAXi__Workflow_Combo.py | 2746ad3c61839a9ddfbb3d73c805ae15c72504ac | [
"MIT",
"Apache-2.0"
] | permissive | RTXteam/RTX | 97d2a8946d233d48cc1b165f5e575af21bda4b26 | ed0693dd03149e56f7dfaf431fb8a82ace0c4ef3 | refs/heads/master | 2023-09-01T21:48:49.008407 | 2023-09-01T20:55:06 | 2023-09-01T20:55:06 | 111,240,202 | 43 | 31 | MIT | 2023-09-14T16:20:01 | 2017-11-18T21:19:13 | Python | UTF-8 | Python | false | false | 2,385 | py | """ This example sends a simple set of DSL commands to the ARAX API.
"""
# Import minimal requirements
import requests
import json
import re
# Set the base URL for the ARAX reasoner and its endpoint
endpoint_url = 'https://arax.ncats.io/api/arax/v1.1/query'
# Create a dict of the request, specifying the list of DSL commands
request = {
"message": {},
"operations": { "actions": [
"add_qnode(name=acetaminophen, key=n00)",
"add_qnode(categories=biolink:Protein, key=n01)",
"add_qedge(subject=n01, object=n00, key=e00)",
"expand()",
"overlay(action=compute_ngd, virtual_relation_label=N1, subject_qnode_key=n00, object_qnode_key=n01)",
"resultify()",
] },
"workflow": [
{ "id": "filter_results_top_n", "parameters": { "max_results": 17 } }
]
}
# Send the request to RTX and check the status
print(f"INFO: Sending ARAXi + workflow combo program to {endpoint_url}")
response_content = requests.post(endpoint_url, json=request, headers={'accept': 'application/json'})
status_code = response_content.status_code
if status_code != 200:
print("ERROR returned with status "+str(status_code))
response_dict = response_content.json()
print(json.dumps(response_dict, indent=2, sort_keys=True))
exit()
# Unpack the response content into a dict
response_dict = response_content.json()
#print(json.dumps(response_dict, indent=2, sort_keys=True))
# Display the information log
for message in response_dict['logs']:
if True or message['level'] != 'DEBUG':
print(f"{message['timestamp']}: {message['level']}: {message['message']}")
# Display the results
print(f"Results ({len(response_dict['message']['results'])}):")
for result in response_dict['message']['results']:
confidence = 0.0
if 'confidence' in result:
confidence = result['confidence']
if confidence is None:
confidence = 0.0
essence = '?'
if 'essence' in result:
essence = result['essence']
print(" -" + '{:6.3f}'.format(confidence) + f"\t{essence}")
# These URLs provide direct access to resulting data and GUI
print(f"Data: {response_dict['id']}")
if response_dict['id'] is not None:
match = re.search(r'(\d+)$', response_dict['id'])
if match:
print(f"GUI: https://arax.ncats.io/NewFmt/?r={match.group(1)}")
| [
"edeutsch@systemsbiology.org"
] | edeutsch@systemsbiology.org |
1ce57ab64e06a5bbeeefde752bdbc5f55551c62b | 020eb7ca1826df843cc294590ce55ef8b2076263 | /coursework/control-flow/using-conditional-statements/exercise3/func.py | c96fa26d8270a91a99b6487c75080eb6c2a3bddc | [] | no_license | murffious/pythonclass-cornell | d95e3a4a50653f3caee462fcd6bd6a5f476c7248 | da4abfac3af0f4706c1c4afcf1ff978aa41e765c | refs/heads/master | 2023-02-06T05:27:41.199544 | 2020-03-23T17:27:36 | 2020-03-23T17:27:36 | 227,713,413 | 0 | 0 | null | 2023-02-02T05:14:41 | 2019-12-12T23:11:32 | Python | UTF-8 | Python | false | false | 1,636 | py | """
A function to extract names from e-mail addresses.
Author: Paul Murff
Date: Jan 9 2020
"""
import introcs
def extract_name(s):
"""
Returns the first name of the person in e-mail address s.
We assume (see the precondition below) that the e-mail address is in one of
three forms:
last.first@megacorp.com
last.first.middle@consultant.biz
first.last@mompop.net
where first, last, and middle correspond to the person's first, middle, and
last name. Names are not empty, and contain only letters. Everything after the
@ is guaranteed to be exactly as shown.
The function preserves the capitalization of the e-mail address.
Examples:
extract_name('smith.john@megacorp.com') returns 'john'
extract_name('McDougal.Raymond.Clay@consultant.biz') returns 'Raymond'
extract_name('maggie.white@mompop.net') returns 'maggie'
extract_name('Bob.Bird@mompop.net') returns 'Bob'
Parameter s: The e-mail address to extract from
Precondition: s is in one of the two address formats described above
"""
# You must use an if-elif-else statement in this function.
full_name = s[:introcs.find_str(s, '@')]
first = ''
if '@megacorp.com' in s:
first = full_name[introcs.find_str(s, '.')+1:]
elif '@mompop.net' in s:
first = full_name[:introcs.find_str(s, '.')]
elif '@consultant.biz' in s:
half_name = full_name[introcs.find_str(s, '.')+1:]
first = half_name[:introcs.find_str(half_name, '.')]
else:
return first
return first
| [
"titan.murff@gmail.com"
] | titan.murff@gmail.com |
4b001d53c03bc9f56445647b9cb02b088ddb632e | 264ce32d9eebb594cc424ecb3b8caee6cb75c2f3 | /content/hw/04_parameter_null/ok/tests/q2_07.py | 84c54e5929d731f39069c4cd1da758bbe23ce0c3 | [] | no_license | anhnguyendepocen/psych101d | a1060210eba2849f371d754e8f79e416754890f9 | 41057ed5ef1fd91e243ab41040f71b51c6443924 | refs/heads/master | 2022-03-24T02:20:32.268048 | 2019-12-21T02:51:02 | 2019-12-21T02:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | test = {
"name": "2: Estimating p and the null distribution of t, easy task",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
>>> # Is the center of the distribution plausible?
>>> np.abs(null_samples.mean() - true_mu_t) < sem_bound
True
>>> # Is the spread of the distribution reasonable?
>>> np.abs(null_samples.var() - true_var_t) < sev_bound
True
""",
"hidden": False,
"locked": False
},
{
"code": r"""
>>> # Is the value of p reasonable?
>>> 0.5 < easy_p_pymc < 0.6
True
""",
"hidden": False,
"locked": False
}
],
"setup": """
>>> null_samples = pd.Series(pymc_null_ts_easy)
>>> n_samples = len(null_samples)
>>> df = 18
>>> true_mu_t = 0
>>> true_var_t = bound.compute_var_t(df)
>>> sem_bound = bound.get_bound_t_mean(n_samples, df=df)
>>> sev_bound = bound.get_bound_t_variance(n_samples, df=df)
""",
"teardown": "",
"type": "doctest"}]
}
| [
"charlesfrye@berkeley.edu"
] | charlesfrye@berkeley.edu |
7c18941abf61e4145754fe7e1e3b4d587799f0d0 | f70c83e63a1a6ae55083d879197ffeeecfb6036c | /catkin_ws/buildold/ros_arduino_bridge/ros_arduino_firmware/catkin_generated/pkg.installspace.context.pc.py | b73a6eac062336446d3f1ec5d298dac4998391e4 | [] | no_license | jdumont0201/ros-cpp-robot | ed46b004bfac890a32b76d346a3ad506624c3cda | b48e0f0a84c8720e59aebdfc3a28f014a64675c6 | refs/heads/master | 2020-03-16T16:22:15.134808 | 2018-05-23T08:24:40 | 2018-05-23T08:24:40 | 132,784,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ros_arduino_firmware"
PROJECT_SPACE_DIR = "/home/jbmdumont/catkin_ws/install"
PROJECT_VERSION = "0.2.0"
| [
"jbmdumont@localhost.localdomain"
] | jbmdumont@localhost.localdomain |
a74f3d34d7c2cdc48582c70f7525ba1946d0cca9 | dc089930c6dd627b1ca37188c9515b6b7bfc3ba3 | /conscious_consumer/store/urls.py | b3978acfece9cb1ceabcc26029a7ad4ce0f8a4a8 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | UPstartDeveloper/conscious-consumer | 923e57b6d1478b654786bfdaa905e7fde17305d7 | 98c5b8c9612b536aa8873353dd48d916287dff5d | refs/heads/master | 2022-11-29T16:16:57.726101 | 2021-04-26T19:57:59 | 2021-04-26T19:57:59 | 247,784,145 | 0 | 0 | MIT | 2022-11-22T07:39:21 | 2020-03-16T18:03:00 | JavaScript | UTF-8 | Python | false | false | 632 | py | from django.urls import path
from .views import (
ProductList,
ProductCreate,
ProductDetail,
ProductUpdate,
ProductDelete,
)
app_name = "store"
urlpatterns = [
# Product CRUD-related URLs
path("products/", ProductList.as_view(), name="product_list"),
path("products/new/", ProductCreate.as_view(), name="product_create"),
path("products/<slug:slug>/edit/", ProductUpdate.as_view(), name="product_update"),
path(
"products/<slug:slug>/delete/", ProductDelete.as_view(), name="product_delete"
),
path("products/<slug:slug>/", ProductDetail.as_view(), name="product_detail"),
]
| [
"zainr7989@gmail.com"
] | zainr7989@gmail.com |
97823ad8e8fe53a9e20e34f9ce7ce89c75df003e | 010c5fbc97731286be00028ff33fc981d943bca3 | /primal/src/code/impute/tests/cgi/count_cgi_genotypes.old.py | f36441491c351e27f58bc314722118098484e0fa | [] | no_license | orenlivne/ober | 6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30 | 810b16b2611f32c191182042240851152784edea | refs/heads/master | 2021-01-23T13:48:49.172653 | 2014-04-03T13:57:44 | 2014-04-03T13:57:44 | 6,902,212 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,419 | py | #!/usr/bin/env python
'''
============================================================
Calculate call rates in imputed CGI files.
Created on February 18, 2012
@author: Oren Livne <livne@uchicago.edu>
============================================================
'''
import sys, os, csv, itertools, numpy as np, traceback, util
from impute.phasing.examples import wgs_sample_index
from optparse import OptionParser
#---------------------------------------------
# Constants
#---------------------------------------------
# All possible genotypes
GENOTYPES = [x[0] + x[1] for x in list(itertools.product('N01', 'N01'))]
# Converts CGI allele code to our numeric code
CGI_LETTER_TO_ALLELE = {'N': 0, '0': 1, '1': 2}
def genotype_start_index(line):
'''Return the start index of g entries in the list line. If not found, returns -1.'''
index = 6
for x in line[6:]:
if x in GENOTYPES:
return index
else:
index += 1
return -1
def print_count_by_snp(lines, out, id_list):
'''Count total genotypes for each SNPs.'''
# Initialize all genotype counts at 0
# Stream lines and increment counts
for line in lines:
# Lines may start with a variable no. of items from the csv reader's perspective (e.g.,
# indel with empty substitution fields will result in consecutive spaces. Calculate the
# start of the genotype sublist
index = genotype_start_index(line)
genotype = line[index:]
# Pick out the relevant IDs
count = dict(zip(GENOTYPES, [0] * len(GENOTYPES)))
for x in (np.array(genotype)[id_list] if id_list is not None else genotype):
count[x] += 1
print_count_total(count, out)
def count_total(lines, id_list, variant_type=None, phasing_rate= 0.0):
'''Count total genotypes over the entire file.'''
# Initialize all genotype counts at 0
count = dict(zip(GENOTYPES, [0] * len(GENOTYPES)))
wgs = wgs_sample_index()
total_wgs = len(wgs)
filter_on_phasing = phasing_rate > 0.0001
filter_on_variant_type = variant_type != 'all'
fully_called = lambda x: x == '00' or x == '01' or x == '10' or x == '11'
# Stream lines and increment counts
for line in lines:
# Filter variant type
if filter_on_variant_type and line[4] != variant_type:
continue
# Lines may start with a variable no. of items from the csv reader's perspective (e.g.,
# indel with empty substitution fields will result in consecutive spaces. Calculate the
# start of the genotype sublist
genotype = line[genotype_start_index(line):]
# Filter to phasing rate >= phasing_rate
if filter_on_phasing:
rate = float(len(np.where(map(fully_called, np.array(genotype)[wgs]))[0])) / total_wgs
if rate < phasing_rate:
continue
# Pick out the relevant IDs
for x in (np.array(genotype)[id_list] if id_list is not None else genotype):
count[x] += 1
return count
def print_count_total(count, out):
'''Print total count results: (genotype count frequency) columns for all genotypes.'''
total = sum(count.itervalues())
for k in GENOTYPES:
out.write('%s %8d %.3f ' % (''.join(map(str, map(CGI_LETTER_TO_ALLELE.get, k))), count[k], (1.0 * count[k]) / total))
out.write('\n')
####################################################################################
def __parse_command_line_args():
'''Parse and validate command-line arguments.'''
PROGRAM = os.path.basename(sys.argv[0])
usage = 'Usage: %s\n' \
'Calculate call rates in a CGI imputed tab-delimited standard input.\n' \
'\nType ''%s -h'' to display full help.' % (PROGRAM, PROGRAM)
parser = OptionParser(usage=usage)
parser.add_option('-d', '--data-file', type='str' , dest='data_file',
default=None, help='If specified, reads from data file, otherwise reads from stdin')
parser.add_option('-i', '--id-index-file', type='str' , dest='id_file',
default=None, help='If specified, outputs only the IDs listed in this file (these are indices between 0 and #ids-1, if the input file has #ids genotype columns)')
parser.add_option('-s', '--snp', action='store_true' , dest='group_by_snp', default=False,
help='Group by snp')
parser.add_option('-t', '--variant-type', type='str', dest='variant_type', default='all',
help='Variant type to select (e.g. snp). ''all'' counts all variants.')
parser.add_option('-p', '--min-phasing-rate', type='float', dest='phasing_rate', default= 0.0,
help='Minimum WGS phasing rate to consider (non-negative value will disable this option)')
options, args = parser.parse_args(sys.argv[1:])
if len(args) != 0:
print usage
sys.exit(1)
return options
def __main(options):
'''Main program - accepts an options struct.'''
# If id file is specified, read into the 'id_list' array
id_list = np.loadtxt(options.id_file, dtype=np.int) if options.id_file else None
# Init dictionary of all-possible-genotypes-to-counts
try:
f = open(options.data_file, 'rb') if options.data_file else sys.stdin
lines = (line for line in csv.reader(f, delimiter='\t', skipinitialspace=True) if line)
if options.group_by_snp:
print_count_by_snp(lines, sys.stdout, id_list)
else:
count = count_total(lines, id_list, variant_type=options.variant_type,
phasing_rate=options.phasing_rate)
print_count_total(count, sys.stdout)
except (IOError, OSError):
traceback.print_exc(file=sys.stdout)
sys.exit(141)
def main(**kwargs):
'''Main program - accepts argument dictionary.'''
# Default options
options = util.Struct(data_file=None, id_file=None, group_by_snp=False, variant_type='all',
phasing_rate= 0.0)
# Override with passed arguments
options.update(**kwargs)
# (valid, options, error_msg) = __validate_options(options)
# if not valid:
# raise ValueError('Bad options: %s' % (error_msg,))
return __main(options)
if __name__ == '__main__':
'''Main program - accepts CLI arguments.'''
__main(__parse_command_line_args())
| [
"oren.livne@gmail.com"
] | oren.livne@gmail.com |
8522128d89b824867b58a7b3c2b8e336b6cb1814 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/tensorflow/contrib/learn/python/learn/tests/__init__.py | f6a58889f9c89c6369708df68a903013932811c5 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 891 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn tests."""
# TODO(ptucker): Move these to the packages of the units under test.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
9acfaeaff0e83be2eca65c5a519804e3e11d64ff | 6fdbd55d5232d66a740e2893cc9939cb6e89455d | /geupshik_translator/config/storages.py | 181ae2bf99c7e69e523a9a27a78d7df2d6bd122c | [] | no_license | suhjohn/Slang-Translator-KR | f24ff76b034cc78926a30a7ee9b50b15855f5b5a | 69f524ae4c1ce8bbac481ce73221f18747144001 | refs/heads/master | 2021-08-08T18:55:02.819204 | 2017-11-10T12:23:53 | 2017-11-10T12:41:40 | 110,214,064 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from storages.backends.s3boto3 import S3Boto3Storage
from django.conf import settings
class StaticStorage(S3Boto3Storage):
location = settings.STATICFILES_LOCATION
class MediaStorage(S3Boto3Storage):
location = settings.MEDIAFILES_LOCATION
| [
"johnsuh94@gmail.com"
] | johnsuh94@gmail.com |
76b9d760319a2058057ef8578b535f1dca0e79af | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/220/users/2023/codes/1647_2443.py | 6cbe7ef05d32e52d810ed45d557bc58163e7e18e | [] | no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # Tanque de combustivel.
from math import*
raio = float(input("digite o raio:"))
altura = float(input("digite a altura:"))
opcao = float(input("digite 1 para volume do ar ou 2 para do combustivel:"))
calota_esferica = ((pi*(altura**2)*(3*raio-altura)))/3
volume_esfera = 4*pi*(raio**3)/3
if (opcao==1):
v = calota_esferica
else:
v = volume_esfera - calota_esferica
print(round(v,4))
| [
"psb@icomp.ufam.edu.br"
] | psb@icomp.ufam.edu.br |
42b8ceb2a472566a9eb54fe3e020f1d07b99f9a9 | 7b4f9a5937c6d390289d7252266cfdd3c62be728 | /how_to_think/chapter_3/second_set/exercise_3_4_4_7_b.py | 6029766e3231f4baef19c32fac035a8f6bb033d6 | [] | no_license | LCfP-basictrack/basictrack-2020-2021-2b | d7ea1dc651c202d9e433588c9df8cf3554fd80e8 | 268f066a9baade3c4300a72ef7a866e535a714e0 | refs/heads/master | 2023-05-05T22:32:41.738918 | 2021-05-27T15:12:40 | 2021-05-27T15:12:40 | 358,275,084 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import turtle
screen = turtle.Screen()
pirate = turtle.Turtle()
steps = [160, -43, 270, -97, -43, 200, -940, 17, -86]
heading = 0
for step in steps:
pirate.left(step)
pirate.forward(100)
heading += step
print("The final heading is", heading % 360)
screen.exitonclick()
| [
"mail@vincentvelthuizen.com"
] | mail@vincentvelthuizen.com |
cbdbd4b45853d6a0849a51a1b17e1bae1b10a9ce | c97830c72b99f005a2024ce57f8af9a912b18c59 | /acc/migrations/0006_profile_id_image.py | ca338e9102c9f4fe6b21eb4d004a7ff4e328efc8 | [] | no_license | toluwanicareer/xenos | 77b4d9c6516c45fffef6affbcaeccdc02ec5ff3a | ba1c25c68c87aaddcf1237e2c055b055e24fa4bc | refs/heads/master | 2021-05-04T15:15:00.373122 | 2018-04-08T20:10:00 | 2018-04-08T20:10:00 | 120,223,340 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-17 17:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('acc', '0005_auto_20180217_0620'),
]
operations = [
migrations.AddField(
model_name='profile',
name='id_image',
field=models.ImageField(null=True, upload_to=b''),
),
]
| [
"abiodun.toluwanii@gmail.com"
] | abiodun.toluwanii@gmail.com |
76e9b88a8acfc25d43ab10dc0fc98798ffa21f65 | cb6461bfae8b0935b7885697dad0df60670da457 | /pychron/dashboard/tasks/server/panes.py | 1ec709a17a70d5aaec69977e3dd98bc2924d76de | [
"Apache-2.0"
] | permissive | USGSMenloPychron/pychron | 00e11910511ca053e8b18a13314da334c362695a | 172993793f25a82ad986e20e53e979324936876d | refs/heads/develop | 2021-01-12T14:09:18.983658 | 2018-02-06T14:25:05 | 2018-02-06T14:25:05 | 69,751,244 | 0 | 0 | null | 2016-10-01T16:59:46 | 2016-10-01T16:59:46 | null | UTF-8 | Python | false | false | 3,111 | py | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traitsui.api import View, UItem, VGroup, HGroup, Group, VSplit
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.editors import TableEditor, InstanceEditor, ListEditor
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.table_column import ObjectColumn
from pychron.core.ui.custom_label_editor import CustomLabel
class DashboardCentralPane(TraitsTaskPane):
def traits_view(self):
url = CustomLabel('object.notifier.url', label='URL')
agrp = VGroup(UItem('devices', editor=ListEditor(mutable=False,
style='custom',
editor=InstanceEditor(
view=View(UItem('graph', style='custom'))))), label='All')
igrp = VGroup(UItem('selected_device', style='custom'), label='Individual')
tgrp = HGroup(url, UItem('clear_button', tooltip='Clear current errors'))
# v = View(
# VGroup(HGroup(url, UItem('clear_button', tooltip='Clear current errors')),
# UItem('selected_device',
# style='custom'),
#
# )))
v = View(VGroup(tgrp, Group(agrp, igrp, layout='tabbed')))
return v
class DashboardDevicePane(TraitsDockPane):
id = 'pychron.dashboard.devices'
def traits_view(self):
cols = [CheckboxColumn(name='use'),
ObjectColumn(name='name', editable=False)]
editor = TableEditor(columns=cols,
selected='selected_device')
cols = [ObjectColumn(name='name', label='Name'),
ObjectColumn(name='last_value', label='Value'),
ObjectColumn(name='last_time_str', label='Timestamp')]
veditor = TableEditor(columns=cols,
editable=False)
v = View(VSplit(UItem('devices', editor=editor),
UItem('values', editor=veditor)))
return v
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
5b86ad9f6206d82584c6a1da085a0e3e72e2b250 | 11aac6edab131293027add959b697127bf3042a4 | /busiestServers.py | 6fee9c8789a62b906f53a1556f08cd3f3b0a2b41 | [] | no_license | jdanray/leetcode | a76b3436002b31865967b757b73c85992636383b | fd736af3e79899b86dac89d4d925d5bd985944ad | refs/heads/master | 2023-08-15T01:20:05.110565 | 2023-08-14T00:25:58 | 2023-08-14T00:25:58 | 148,686,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # https://leetcode.com/problems/find-servers-that-handled-most-number-of-requests/
from sortedcontainers import SortedList
class Solution(object):
def busiestServers(self, k, arrival, load):
count = collections.Counter()
avail = SortedList(list(range(k)))
busy = []
for (i, a) in enumerate(arrival):
while busy and busy[0][0] <= a:
_, s = heapq.heappop(busy)
avail.add(s)
if avail:
j = avail.bisect_left(i % k)
if j >= len(avail):
s = avail[0]
else:
s = avail[j]
avail.remove(s)
heapq.heappush(busy, (a + load[i], s))
count[s] += 1
maxc = max(count.values())
return [s for s in range(k) if count[s] == maxc]
| [
"jdanray@users.noreply.github.com"
] | jdanray@users.noreply.github.com |
2f947ab99d89b2f0ef78907083e8ebf86d2ad25a | 9452f681ea486fc53ad88d05392aed5fc450805c | /code25_all/python/2350110.txt | cb11aa556ef4974922179fb7719a30672dd9d2db | [] | no_license | CoryCollins/src-class | 11a6df24f4bd150f6db96ad848d7bfcac152a695 | f08a2dd917f740e05864f51ff4b994c368377f97 | refs/heads/master | 2023-08-17T11:53:28.754781 | 2021-09-27T21:13:23 | 2021-09-27T21:13:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,273 | txt | #!/usr/bin/python
import timeit
import numpy
import ctypes
import random
USE_RANDOM=True
USE_STATIC_BUFFER=True
STATIC_BUFFER = numpy.empty(4096*20, dtype=numpy.float32)
def render(i):
# pretend these are different each time
if USE_RANDOM:
tex_left, tex_right, tex_top, tex_bottom = random.random(), random.random(), random.random(), random.random()
left, right, top, bottom = random.random(), random.random(), random.random(), random.random()
else:
tex_left, tex_right, tex_top, tex_bottom = 0.0, 1.0, 1.0, 0.0
left, right, top, bottom = -1.0, 1.0, 1.0, -1.0
ibuffer = (
tex_left, tex_bottom, left, bottom, 0.0, # Lower left corner
tex_right, tex_bottom, right, bottom, 0.0, # Lower right corner
tex_right, tex_top, right, top, 0.0, # Upper right corner
tex_left, tex_top, left, top, 0.0, # upper left
)
return ibuffer
# create python list.. convert to numpy array at end
def create_array_1():
ibuffer = []
for x in xrange(4096):
data = render(x)
ibuffer += data
ibuffer = numpy.array(ibuffer, dtype=numpy.float32)
return ibuffer
# numpy.array, placing individually by index
def create_array_2():
if USE_STATIC_BUFFER:
ibuffer = STATIC_BUFFER
else:
ibuffer = numpy.empty(4096*20, dtype=numpy.float32)
index = 0
for x in xrange(4096):
data = render(x)
for v in data:
ibuffer[index] = v
index += 1
return ibuffer
# using slicing
def create_array_3():
if USE_STATIC_BUFFER:
ibuffer = STATIC_BUFFER
else:
ibuffer = numpy.empty(4096*20, dtype=numpy.float32)
index = 0
for x in xrange(4096):
data = render(x)
ibuffer[index:index+20] = data
index += 20
return ibuffer
# using numpy.concat on a list of ibuffers
def create_array_4():
ibuffer_concat = []
for x in xrange(4096):
data = render(x)
# converting makes a diff!
data = numpy.array(data, dtype=numpy.float32)
ibuffer_concat.append(data)
return numpy.concatenate(ibuffer_concat)
# using numpy array.put
def create_array_5():
if USE_STATIC_BUFFER:
ibuffer = STATIC_BUFFER
else:
ibuffer = numpy.empty(4096*20, dtype=numpy.float32)
index = 0
for x in xrange(4096):
data = render(x)
ibuffer.put( xrange(index, index+20), data)
index += 20
return ibuffer
# using ctype array
CTYPES_ARRAY = ctypes.c_float*(4096*20)
def create_array_6():
ibuffer = []
for x in xrange(4096):
data = render(x)
ibuffer += data
ibuffer = CTYPES_ARRAY(*ibuffer)
return ibuffer
def equals(a, b):
for i,v in enumerate(a):
if b[i] != v:
return False
return True
if __name__ == "__main__":
number = 100
# if random, don't try and compare arrays
if not USE_RANDOM and not USE_STATIC_BUFFER:
a = create_array_1()
assert equals( a, create_array_2() )
assert equals( a, create_array_3() )
assert equals( a, create_array_4() )
assert equals( a, create_array_5() )
assert equals( a, create_array_6() )
t = timeit.Timer( "testing2.create_array_1()", "import testing2" )
print 'from list:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_2()", "import testing2" )
print 'array: indexed:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_3()", "import testing2" )
print 'array: slicing:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_4()", "import testing2" )
print 'array: concat:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_5()", "import testing2" )
print 'array: put:', t.timeit(number)/number*1000.0, 'ms'
t = timeit.Timer( "testing2.create_array_6()", "import testing2" )
print 'ctypes float array:', t.timeit(number)/number*1000.0, 'ms'
$ python testing2.py
from list: 15.0486779213 ms
array: indexed: 24.8184704781 ms
array: slicing: 50.2214789391 ms
array: concat: 44.1691994667 ms
array: put: 73.5879898071 ms
ctypes float array: 20.6674289703 ms
| [
"znsoft@163.com"
] | znsoft@163.com |
2cbc0e35a8851a5ac3b4a3a4cae1d7c9d6e26397 | 4033fed719d91ccea96bb854f499a4ff253c54d3 | /custom_features/DBTT.py | 79b3b098c08eec105c67913a76224dce5ea0fd59 | [] | no_license | H0lland/MAST-ML | 612cc195c08f612b4846b0da40f113c3b40dfc47 | 40ef2c17bb8bb26b928f6cc4623a3a46e02c671a | refs/heads/master | 2020-03-15T13:33:25.533453 | 2018-08-10T15:10:15 | 2018-08-10T15:10:15 | 132,169,522 | 0 | 0 | null | 2018-06-12T15:40:50 | 2018-05-04T17:25:49 | Jupyter Notebook | UTF-8 | Python | false | false | 1,577 | py | import numpy as np
import copy
from FeatureOperations import FeatureNormalization, FeatureIO
__author__ = "Tam Mayeshiba"
class DBTT():
"""Class for creating custom feature columns specifically for the
DBTT project.
New methods may be added.
Args:
dataframe <data object>
Returns:
Raises:
ValueError if dataframe is None
"""
def __init__(self, dataframe=None):
"""Custom data handler
Attributes:
self.original_dataframe <data object>: Dataframe
self.df <data object>: Dataframe
Each custom feature should take keyword arguments.
"""
if dataframe is None:
raise ValueError("No dataframe.")
self.original_dataframe = copy.deepcopy(dataframe)
self.df = copy.deepcopy(dataframe)
return
def calculate_EffectiveFluence(self, pvalue=0, ref_flux = 3e10, flux_feature="",fluence_feature="", scale_min = 1e17, scale_max = 1e25, **params):
"""Calculate effective fluence
"""
fluence = self.df[fluence_feature]
flux = self.df[flux_feature]
EFl = fluence * (ref_flux / flux) ** pvalue
EFl = np.log10(EFl)
fio = FeatureIO(self.df)
new_df = fio.add_custom_features(["EFl"],EFl)
fnorm = FeatureNormalization(new_df)
N_EFl = fnorm.minmax_scale_single_feature("EFl",
smin = np.log10(scale_min),
smax = np.log10(scale_max))
return N_EFl
| [
"mayeshiba@wisc.edu"
] | mayeshiba@wisc.edu |
a8880daa6f3d42715335e1ab99fa44f4a9d4f40b | 8b09c10f7ebabccf01dcf21775c7fe0550ee0d9e | /virtual/bin/confusable_homoglyphs | d477d110b1c479190e2e1f2ca41abed42a2bb72f | [
"MIT"
] | permissive | MaryMbugua/Safe | f25da88ccefcc5d54ea142b634be3493be1e24e1 | 2aaa4760cfa96aafc4d37233fe7b4df584e2ed79 | refs/heads/master | 2020-03-18T11:26:22.678249 | 2018-06-05T06:21:58 | 2018-06-05T06:21:58 | 134,671,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | #!/home/nish/Desktop/projects/neighbourhoodwatch/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from confusable_homoglyphs.cli import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
| [
"marymbugua.nm@gmail.com"
] | marymbugua.nm@gmail.com | |
6eb1f05ced6608ae3c37f76584a08945f16a2723 | f9cc7246e323a9ce0d93a4e9722e1408e494fb4f | /paymentserver/app/urls.py | d9b4627f88e861d9f3fc7e2a54918547846fd3a8 | [] | no_license | ketsalot1/apps-payment-server | a21e3043b71f85db103e6f094e1ebb243119f491 | e571776af364ebf32dec9fbe4fb1a6554d279328 | refs/heads/master | 2021-01-13T07:09:34.186299 | 2012-03-23T04:06:53 | 2012-03-23T04:06:53 | 69,055,263 | 1 | 0 | null | 2016-09-23T19:37:14 | 2016-09-23T19:37:14 | null | UTF-8 | Python | false | false | 225 | py | from django.conf.urls.defaults import *
from django.conf import settings
urlpatterns = patterns('app.views',
(r'^$', 'home'),
url(r'^payment_succeeded$', 'payment_succeeded',
name='app.payment_succeeded'),
)
| [
"kumar.mcmillan@gmail.com"
] | kumar.mcmillan@gmail.com |
2e699869612e99e69257d64e9b23679d35d7c5b7 | f0e10b8dfabfe931e7fa9d9adda6ca4fcde6940d | /tutu/utils.py | cbfe3a94cdf25490e2330e9b42b4dfd63d60f418 | [
"MIT"
] | permissive | priestc/django-tutu | e0d4cbd977ca1d7ed97f1fb510aa655ada6d74d3 | 267a44a0dd75a1de42d05801149f36ef1ac84b57 | refs/heads/master | 2020-09-27T07:13:01.442268 | 2020-02-08T05:50:53 | 2020-02-08T05:50:53 | 226,460,887 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,217 | py | from tutu.metrics import Metric
import datetime
from django.conf import settings
def validate_metric(metric):
if isinstance(metric, Metric):
return metric
elif isinstance(metric, type) and issubclass(metric, Metric):
return metric()
else:
raise ValueError("Must be a Metric class or instance")
def get_installed_metrics():
metrics = []
for item in settings.INSTALLED_TUTU_METRICS:
metrics.append(validate_metric(item))
return metrics
def get_metric_from_name(name):
for metric in get_installed_metrics():
if name == metric.internal_name:
return metric
def get_metrics_from_names(metric_names):
metric_list = []
for metric in get_installed_metrics():
if metric.internal_name in metric_names:
metric_list.append(metric)
return metric_list
def get_column_number_and_instance():
column_numbers = {}
for i, metric in enumerate(get_installed_metrics()):
column_numbers[metric.internal_name] = [i+1, metric]
return column_numbers
######################################################
######################################################
def make_test_ticks(start, end):
from tutu.models import Tick
target = start
while(target < end):
Tick.objects.create(date=target, machine="TestMachine")
target += datetime.timedelta(minutes=5)
def make_poll_results(metrics):
import random
from tutu.models import Tick, PollResult
for tick in Tick.objects.all():
for item in metrics:
metric = validate_metric(item)
result = metric.poll()
PollResult.objects.create(
tick=tick,
metric_name=metric.internal_name,
result=result,
success=True,
seconds_to_poll=1
)
def make_nginx_ticks():
from tutu.metrics import Nginx, NginxByStatusCode, NginxPercentUniqueIP, NginxBandwidth
n = Nginx()
start = n.parse_dt("27/Jan/2020:07:35:07 -0800")
end = n.parse_dt("31/Jan/2020:13:28:15 -0800")
make_test_ticks(start, end)
make_poll_results([n, NginxByStatusCode(), NginxPercentUniqueIP(), NginxBandwidth()])
| [
"cp368202@ohiou.edu"
] | cp368202@ohiou.edu |
666fecfb2a75f1bc7777fe2d07e22f40e6d985eb | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D04B/CUSEXPD04BUN.py | e420b19fdad2bfde8e8a20be17750d9949db5fad | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,519 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD04BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'LOC', MIN: 0, MAX: 5},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 1, MAX: 1, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 5, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
{ID: 'TDT', MIN: 1, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'SEL', MIN: 0, MAX: 9},
]},
{ID: 'RFF', MIN: 0, MAX: 999, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 2},
{ID: 'CNT', MIN: 0, MAX: 1},
{ID: 'CNI', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'SGP', MIN: 0, MAX: 9},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 5},
{ID: 'GDS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'PAC', MIN: 0, MAX: 999, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 1},
]},
{ID: 'TOD', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 0, MAX: 10, LEVEL: [
{ID: 'CUX', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
]},
{ID: 'TAX', MIN: 0, MAX: 9, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 1},
]},
{ID: 'DOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 1},
]},
{ID: 'CST', MIN: 0, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 1, MAX: 1},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'TAX', MIN: 0, MAX: 9, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'GEI', MIN: 0, MAX: 1},
]},
]},
]},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
50dd5ace63ee64ad2858aaefffaeb374c9271f86 | 49b1b98e9bbba2e377d8b5318fcc13776bbcb10b | /museum/spiders/exhibition17.py | e1d1505e4d08052e575979f21026047abbede4ea | [] | no_license | BUCT-CS1808-SoftwareEngineering/MusemData_Collection_System | fa4e02ec8e8aaa9a240ba92cf7be33dbc0e8e31f | 023e829c77037ba6d2183d8d64dcb20696b66931 | refs/heads/master | 2023-04-29T05:43:01.627439 | 2021-05-23T03:02:09 | 2021-05-23T03:02:09 | 360,040,880 | 0 | 0 | null | 2021-05-23T03:02:09 | 2021-04-21T05:18:41 | Python | UTF-8 | Python | false | false | 2,232 | py | import scrapy
from museum.items import exhibitionItem
import re
import json
# scrapy crawl exhibition17
class Exhibition17Spider(scrapy.Spider):
name = 'exhibition17'
# allowed_domains = ['www.xxx.com']
start_urls = ['http://www.3gmuseum.cn/web/exhibitionHallOften/conventionalExhibitionPage.do?pageNumber=1&pageSize=12&itemno=25434353']
# headers={
# 'Host': 'www.3gmuseum.cn',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0',
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
# 'Accept-Encoding': 'gzip, deflate',
# 'Referer': 'http://www.3gmuseum.cn/web/exhibitionHallOften/longExhibition.do?itemno=23&itemsonno=25434353',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'X-Requested-With': 'XMLHttpRequest',
# 'Content-Length': '40',
# 'Origin': 'http://www.3gmuseum.cn',
# 'Connection': 'keep-alive',
# 'Cookie': 'JSESSIONID=16433817DCFCAFEF924469AD000E1054; UM_distinctid=1794b9a2a6354e-090ba774c3f989-4c3f2c72-1fa400-1794b9a2a6491a; CNZZDATA1254436347=90473301-1620471035-%7C1620471035',
# 'Pragma': 'no-cache',
# 'Cache-Control': 'no-cache'
# } headers=self.headers,
def start_requests(self):
yield scrapy.Request(url=self.start_urls[0], callback=self.parse, method="POST")
def parse(self, response):
item = exhibitionItem()
coll_list = json.loads(response.text)["list"]
for i in coll_list:
collectionName = i["formattitle"]
collectionName = ''.join(collectionName)
collectionImageUrl = i["themeimg"]
collectionImageUrl = ''.join(collectionImageUrl)
collectionDescription = str(i["contents"])
collectionDescription = re.sub(r'<\/?.+?\/?>','',collectionDescription)
# collectionDescription = ''.join(collectionDescription)
# collectionImageUrl = 'http://www.njmuseum.com' + ''.join(collectionImageUrl)
print((collectionName, collectionDescription, collectionImageUrl))
| [
"szqszq00766@163.com"
] | szqszq00766@163.com |
f67376bd7e13509194aea02f91ac23207e7fca15 | 4bd207d288c95b9f20785bb841224b914f05c280 | /code-master/lib/bitbots/modules/behaviour/head/decisions/head_duty_decider.py | 693c083124300ef7e2a84e8530d287cd73e15850 | [] | no_license | hendrikvgl/RoboCup-Spielererkennung | 435e17ee540c4b4c839e26d54db2528a60e6a110 | c41269a960f4b5ea0814a49f5a20ae17eb0a9d71 | refs/heads/master | 2021-01-10T10:39:00.586760 | 2015-10-21T12:42:27 | 2015-10-21T12:42:27 | 44,675,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,726 | py | # -*- coding:utf-8 -*-
"""
HeadDutyDecider
^^^^^^^^^^^^^^^
Entscheidet was der Kopf tun soll
History:
* 19.08.14: Created (Nils Rokita)
"""
import time
from bitbots.modules.abstract.abstract_decision_module import AbstractDecisionModule
from bitbots.modules.abstract.abstract_module import debug_m
from bitbots.modules.behaviour.head.decisions.search_and_confirm import SearchAndConfirmBall, SearchAndConfirmEnemyGoal
from bitbots.modules.behaviour.head.decisions.continious_search import ContiniousSearch
from bitbots.util import get_config
class HeadDutyDecider(AbstractDecisionModule):
def __init__(self, _):
super(HeadDutyDecider, self).__init__()
toggles = get_config()["Behaviour"]["Toggles"]["Head"]
self.toggle_goal_vison_tracking = toggles["goalVisionTracking"]
self.toggle_switch_ball_goal = toggles["switchBallGoalSearch"]
config = get_config()
self.confirm_time = config["Behaviour"]["Common"]["Search"]["confirmTime"]
self.last_confirmd_goal = 0
self.fail_goal_counter = 0
self.ball_prio = 0
self.goal_prio = 0
self.trackjustball_aftergoal = False
def perform(self, connector, reevaluate=False):
# todo refactor in more decisions
""" This is the root for the head stack machine """
if connector.raw_vision_capsule().ball_seen():
self.ball_prio = max(0, self.ball_prio - 3)
else:
self.ball_prio = min(120, self.ball_prio + 5)
if connector.raw_vision_capsule().any_goal_seen():
self.goal_prio = max(0, self.goal_prio - 2)
else:
self.goal_prio = min(100, self.goal_prio + 3)
debug_m(4, "GoalPrio", self.goal_prio)
debug_m(4, "BallPrio", self.ball_prio)
debug_m(4, "BallLastCOnfirmed", time.time() - connector.blackboard_capsule().get_confirmed_ball())
debug_m(4, "BallLastStratedconfirm", time.time() - connector.blackboard_capsule().get_started_confirm_ball())
if connector.blackboard_capsule().is_no_head_movement_at_all():
debug_m(4, "Headdoes", "Nothing")
return self.interrupt()
if connector.blackboard_capsule().is_ball_tracking_still_active():
debug_m(4, "Headdoes", "BallTracking")
return self.push(SearchAndConfirmBall)
if connector.blackboard_capsule().is_enemy_goal_tracking_still_active():
debug_m(4, "Headdoes", "GoalTracking")
return self.push(SearchAndConfirmEnemyGoal)
if connector.blackboard_capsule().is_tracking_both_still_active(): # todo to be tested
debug_m(4, "TrackbothTime", time.time())
if time.time() - connector.blackboard_capsule().get_confirmed_ball() > 5:
debug_m(4, "Headdoes", "TrackBothBall")
return self.push(SearchAndConfirmBall)
# ball long enough seen
elif time.time() - connector.blackboard_capsule().get_confirmed_goal() > 6:
debug_m(4, "Headdoes", "TrackBothGoal")
return self.push(SearchAndConfirmEnemyGoal)
elif self.trackjustball_aftergoal:
debug_m(4, "Headdoes", "TrackBothElse")
return self.push(SearchAndConfirmBall)
if self.toggle_switch_ball_goal:
debug_m(4, "Headdoes", "Priorities")
if self.ball_prio >= self.goal_prio:
return self.push(SearchAndConfirmBall)
else:
return self.push(SearchAndConfirmEnemyGoal)
# Default Head Behaviour
debug_m(4, "Headdoes", "Standardsearch")
return self.push(ContiniousSearch)
def get_reevaluate(self):
return True
| [
"hendrik.vgl@gmail.com"
] | hendrik.vgl@gmail.com |
498cfa85a40dbce059482bbd34786e6433736369 | ff844756ad80c33c2cfcdd7f41a010e8e5674bbf | /test/app_test.py | ed87550c798181e8fababda910e263f710ed8f83 | [] | no_license | riquellopes/congratulations | f133be337413b500052639f30d6ea6b7182681da | 074df3cb2b8dfab007e7fd93f5f1786b0fd7ad95 | refs/heads/master | 2016-09-05T11:52:32.941162 | 2012-03-05T17:18:19 | 2012-03-05T17:18:19 | 3,546,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | # coding: utf-8
import datetime
import unittest
from mock import Mock, patch
from nose.tools import assert_equals, assert_true, assert_raises, assert_false
from app import Congratulations, CongratulationsExEnd, app
class MockUrllib(Mock):
def __init__(self, file_test):
self.file_test = file_test
def read(self):
handle = open(self.file_test)
html = "".join( handle )
return html
class MockCongratulations(Congratulations):
pass
class CongratulationsTest(unittest.TestCase):
def test_class_Congratulations_existe(self):
assert_true(isinstance(Congratulations, object))
def test_objeto_recebe_nome_do_consinscrito(self):
"""
Caso instâcia seja criada, o nome do consinscrito deve ser passado para pesquisa::
"""
c = Congratulations(name='leandro')
assert_equals((c.name.upper()), 'LEANDRO')
def test_caso_nome_nao_seja_passado_deve_haver_um_exception(self):
"""
Caso o nome do consinscrito não seja passso, sistema deve levantar um exception::
"""
assert_raises(Exception, Congratulations)
@patch('app.urllib2.urlopen')
def test_jonas_brother_no_accepted(self, sr):
"""
Consinscrito Jonas Brother não teve seu perfil aprovado::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Jonas Brother', url=app.config['URL_D'])
c.search()
assert_equals(c.status.lower(), "no accepted")
@patch('app.urllib2.urlopen')
def test_leandro_accepted(self, sr):
"""
Consinscrito Leandro teve seu perfil aprovado::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'])
c.search()
assert_equals(c.status.lower(), "accepted")
@patch('app.urllib2.urlopen')
def test_jarbas_no_processed(self, sr):
"""
Consinscrito Jarbas ainda não teve seu perfil processado::
"""
sr.return_value = MockUrllib('teste.html')
c = Congratulations(name='Jarbas', url=app.config['URL_S'])
c.search()
assert_equals(c.status.lower(), "no processed")
@patch('app.urllib2.urlopen')
def test_menssage_tela_jarbas(self, sr):
"""
Caso situação do Jarbas ainda não tem cido processada, sistema gera mensagem::
"""
sr.return_value = MockUrllib('teste.html')
c = Congratulations(name='Jarbas', url=app.config['URL_S'], name_display='@riquellopes')
c.search()
assert_equals(c.display_menssage.lower(), "your curriculum wasn't <span class='wait'>processed</span>")
@patch('app.urllib2.urlopen')
def test_menssagem_tela_jonas(self, sr):
"""
Caso situação do Jonas já tenha cido processada, sistema gera mensagem::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Jonas Brother', url=app.config['URL_D'], name_display='@brother')
c.search()
assert_equals(c.display_menssage.lower(), "sorry your curriculum wasn't <span class='failure'>accepted</span>")
@patch('app.urllib2.urlopen')
def test_messagem_tela_leandro(self, sr):
"""
Caso situação do Leandro já tenha cido processada, sistema gera mensagem::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro')
c.search()
assert_equals(c.display_menssage.lower(), "congratulations your curriculum was <span class='sucess'>accepted</span>")
def test_caso_search_nao_seja_chamado(self):
"""
Caso método search não seja chamado antes do display_menssage, deve haver um exception::
"""
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro')
try:
c.display_menssage
except Exception, e:
assert_true(True)
@patch('app.urllib2.urlopen')
def test_periodo(self, sr):
"""
Caso período de liberação de resultado já tenha encerrado, search deve levantar exception::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro', date_end='2012-02-26')
assert_raises(CongratulationsExEnd, c.search)
@patch('app.urllib2.urlopen')
def test_save(self, sr):
"""
Método save deve gravar as informações em congratulatios.json::
"""
sr.return_value = MockUrllib('teste_dentista.html')
date_end = datetime.datetime.now().strftime("%Y-%m-%d")
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro', date_end=date_end)
assert_true(c.save())
@patch('app.urllib2.urlopen')
def test_save_none(self, sr):
"""
Caso periodo de veficação tenha encerrado, save deve retorna None::
"""
sr.return_value = MockUrllib('teste_dentista.html')
c = Congratulations(name='Leandro', url=app.config['URL_D'], name_display='@leandro', date_end='2012-02-26')
assert_true(c.save() is None)
class ViewTest(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
@patch('app.Congratulations.save')
def test_home(self, cg):
"""
Título na página home deve ser Congratulatios app::
"""
rs = self.app.get("/")
assert_true('<title>Congratulations APP</title>' in str(rs.data) )
@patch('app.urllib2.urlopen')
def test_process(self, sr):
"""
Toda vez que o index for acessado, sistema deve atualizar as informações do arquivo index.html::
"""
sr.return_value = MockUrllib('teste_sistema.html')
rs = self.app.get('/')
assert_true('Last update: <i>%s</i>' % (datetime.datetime.now().strftime("%Y %B, %d %H:%M")) in str(rs.data)) | [
"riquellopes@gmail.com"
] | riquellopes@gmail.com |
6b83013931c9e1b0bc505ae8a5906aa6c3985271 | afcb260d6f0c1d88232d2e300d26d8fb71b5ef43 | /django-app/member/views.py | 0a3ad5f76c034d58d7139ec6bc3f14240c145f63 | [] | no_license | JeongEuiJin/deploy-eb-docker | e5d10f65166ca8a1a4a5fdd32c9647c0d8f5feed | 1f5b57aa5e119f68c169f059e9bf88d5fbf76850 | refs/heads/master | 2020-12-02T17:46:19.905183 | 2017-07-13T07:32:36 | 2017-07-13T07:32:36 | 96,424,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from .forms import CustomUserCreationForm
# Create your views here.
# @login_required
def home(request):
return render(request, 'member/home.html')
def signup(request):
if request.method == 'POST':
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('member:home')
else:
form = CustomUserCreationForm()
context = {
'form': form,
}
return render(request, 'member/signup.html', context)
| [
"hehar1020@gmail.com"
] | hehar1020@gmail.com |
7f028271161ec849660d7f6454b99fd19c01e985 | 2475acb15e1dcd4a42ed8849a6ae7c6f383d5b59 | /calendar_api/migrations/0008_auto_20190529_1753.py | 78acb710d43a80372e2816d2f5f91e5967eda897 | [] | no_license | amigo2/Goker | 2e66cabb32253bfe43a188241b58df962a9a1583 | 0509446382eacabdb7c006a60362891f217bcc3a | refs/heads/master | 2022-12-13T09:50:47.720250 | 2019-08-13T20:54:43 | 2019-08-13T20:54:43 | 169,242,538 | 0 | 0 | null | 2022-12-08T12:23:28 | 2019-02-05T13:06:35 | Python | UTF-8 | Python | false | false | 2,230 | py | # Generated by Django 2.1.4 on 2019-05-29 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calendar_api', '0007_auto_20190529_1749'),
]
operations = [
migrations.RenameField(
model_name='adquisition',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='adquisition',
old_name='end_time',
new_name='start',
),
migrations.RenameField(
model_name='news',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='news',
old_name='end_time',
new_name='start',
),
migrations.RenameField(
model_name='recontact',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='recontact',
old_name='end_time',
new_name='start',
),
migrations.RenameField(
model_name='salesevent',
old_name='end_date',
new_name='end',
),
migrations.RenameField(
model_name='salesevent',
old_name='end_time',
new_name='start',
),
migrations.RemoveField(
model_name='adquisition',
name='start_date',
),
migrations.RemoveField(
model_name='adquisition',
name='start_time',
),
migrations.RemoveField(
model_name='news',
name='start_date',
),
migrations.RemoveField(
model_name='news',
name='start_time',
),
migrations.RemoveField(
model_name='recontact',
name='start_date',
),
migrations.RemoveField(
model_name='recontact',
name='start_time',
),
migrations.RemoveField(
model_name='salesevent',
name='start_date',
),
migrations.RemoveField(
model_name='salesevent',
name='start_time',
),
]
| [
"amigo2@hotmail.com"
] | amigo2@hotmail.com |
623bec47ef4142d1023c81692b0918123a72b98f | 2b0f7d5e7b43bb9d32ee4044a79b0fc67294c986 | /test_skewed.py | 8dcd19468398179bf0edd731c04d67b71f0f73e4 | [] | no_license | rajikalk/Scripts | 766fcc16b97dfd4f918d8efbd2ec529a2dd71639 | 8caec46a6272ff1c7aeb5e359610d7e695dd7d34 | refs/heads/master | 2023-08-30T12:53:09.220312 | 2023-08-28T15:08:23 | 2023-08-28T15:08:23 | 87,136,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,653 | py | import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import math as math
import scipy.special as sp
import pickle
import matplotlib.gridspec as gridspec
def func(x, sigma, mu, alpha, c, amp):
#normal distribution
normpdf = (1/(sigma*np.sqrt(2*math.pi)))*np.exp(-(np.power((x-mu),2)/(2*np.power(sigma,2))))
normcdf = (0.5*(1+sp.erf((alpha*((x-mu)/sigma))/(np.sqrt(2)))))
return 2*amp*normpdf*normcdf + c
files = ["Mach_0.1/multiple_folds_over_5_orbits.pkl","Mach_0.2/multiple_folds_over_5_orbits.pkl"]
max_accretion = []
base_accretion = []
strength = []
beta = []
y_fits = []
plot_e = []
for file in files:
file_open = open(file, 'rb')
multiple_folds, phase_centers, mean_eccentricity, std_eccentricity, accretion_err, n_lines, multiple_folds_normalised = pickle.load(file_open)
file_open.close()
plot_e.append(mean_eccentricity)
x_data = phase_centers[23:-15]
x = np.linspace(np.min(x_data),np.max(x_data),100)
max_accretion.append([])
base_accretion.append([])
beta.append([])
strength.append([])
y_fits.append([])
file_name = file.split('/')[0] +'/'
for orbit in range(len(multiple_folds_normalised)):
'''
if mean_eccentricity[orbit] == 0.27:
import pdb
pdb.set_trace()
'''
y_data = multiple_folds_normalised[orbit][23:-15]
plt.clf()
plt.plot(x_data,y_data,ls='steps-mid')
results = []
for tries in range(50):
sigma = np.random.random()*2*0.15
amp = np.random.random()*2*np.max(y_data)
p = np.array([sigma, x_data[np.argmax(y_data)], -5,np.min(y_data),amp])
try:
popt, pcov = curve_fit(func, x_data, y_data, p)
except:
pass
err = np.sum(np.abs(func(x_data, *popt) - y_data))
results.append((err, popt))
if err < 0.1:
break
err, popt = min(results, key=lambda x:x[0])
if mean_eccentricity[orbit] == 0.27:
popt = np.array([0.35, x_data[np.argmax(y_data)]+0.15, -5,np.median(y_data)-0.5,np.max(y_data)*0.2])
y_fit= func(x, *popt)
sigmag, mu, alpha, base, amp = popt
max = np.max(y_fit)
max_accretion[-1].append(max)
base_accretion[-1].append(np.min(y_fit))
beta[-1].append(max/np.min(y_fit))
strength[-1].append(sigmag)
plt.plot(x,y_fit)
plt.ylim([0,6])
y_fits[-1].append(y_fit)
print('---------------------------------------------')
print('eccentricity = '+str(mean_eccentricity[orbit]))
print('amplitude = '+str(amp))
print('maximum_value = '+str(np.max(y_fit)))
print('base_accretion = '+str(np.min(y_fit)))
print('strength = '+str(sigmag))
plt.savefig(file_name+'fittted_eccentricity_'+str(mean_eccentricity[orbit])+'.pdf')
#Make normalised fits plot
plt.clf()
fig = plt.figure()
fig.set_size_inches(4.0, 6.0)
gs = gridspec.GridSpec(2, 1)
gs.update(hspace=0.0)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0], sharex=ax1, sharey=ax1)
n_lines = len(y_fits[0])
c_index = np.linspace(0.0, 0.95, n_lines)
e_int = 0
for fit in y_fits[0]:
ax1.plot(x, fit, color=plt.cm.magma(c_index[e_int]), label='e='+str(plot_e[0][e_int]))
e_int = e_int + 1
ax1.legend(loc='center left', bbox_to_anchor=(0.985, 0.5))
ax1.set_ylabel("Normalised Accretion")
xticklabels = ax1.get_xticklabels()
plt.setp(xticklabels, visible=False)
ax1.tick_params(axis='x', which='major', direction="in")
e_int = 0
for fit in y_fits[1]:
ax2.plot(x, fit, color=plt.cm.magma(c_index[e_int]), label='e='+str(plot_e[0][e_int]))
e_int = e_int + 1
ax2.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax2.set_xlabel("Orbital Phase ($\phi$)")
ax2.set_ylabel("Normalised Accretion")
ax2.text(0.1, ax1.get_ylim()[1]*0.9, 'T2', va="center", ha="left", color='k', fontsize=args.text_font)
ax1.text(0.1, ax1.get_ylim()[1]*0.9, 'T1', va="center", ha="left", color='k', fontsize=args.text_font)
ax2.tick_params(axis='x', which='major', direction="in")
yticklabels = ax2.get_yticklabels()
plt.setp(yticklabels[-1], visible=False)
plt.savefig('normalised_fits.eps', bbox_inches='tight', pad_inches = 0.02)
plt.savefig('normalised_fits.pdf', bbox_inches='tight', pad_inches = 0.02)
#make beta plot
plt.clf()
plt.scatter(mean_eccentricity, beta[0], label='T1', marker='o')
plt.scatter(mean_eccentricity, beta[1], label='T2', marker='^')
plt.xlabel('eccentricity')
plt.ylabel('$\\beta$')
plt.legend(loc='best')
plt.savefig('beta.pdf')
| [
"reggie@Reggies-MBP.unicph.domain"
] | reggie@Reggies-MBP.unicph.domain |
53a6dc003f4989217077b1e3d96e789daac4ada0 | cb620e43469856c5a9b578ada5e37a3e610adbbb | /tests/orm/relations/test_morph_to_many.py | 82803862b0910b9c05d28a5de19c63b630db2bd5 | [
"MIT"
] | permissive | sxslex/orator | 5c889bab5a5f43be672275f1623135c7e1cbc98c | 0835a7c0341a5ab7e051318a52ab27f58b695916 | refs/heads/develop | 2021-01-09T07:04:54.556519 | 2015-11-25T11:42:53 | 2015-11-25T11:42:53 | 46,634,951 | 1 | 0 | null | 2015-11-21T21:37:25 | 2015-11-21T21:37:24 | null | UTF-8 | Python | false | false | 5,908 | py | # -*- coding: utf-8 -*-
import arrow
from flexmock import flexmock, flexmock_teardown
from ... import OratorTestCase
from ...utils import MockConnection
from orator.query.builder import QueryBuilder
from orator.query.grammars import QueryGrammar
from orator.query.processors import QueryProcessor
from orator.query.expression import QueryExpression
from orator.orm.builder import Builder
from orator.orm.model import Model
from orator.orm.relations import MorphToMany
from orator.orm.relations.pivot import Pivot
from orator.orm.collection import Collection
class OrmMorphToManyTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_eager_constraints_are_properly_added(self):
relation = self._get_relation()
relation.get_query().get_query().should_receive('where_in').once().with_args('taggables.taggable_id', [1, 2])
relation.get_query().should_receive('where').once()\
.with_args('taggables.taggable_type', relation.get_parent().__class__.__name__)
model1 = OrmMorphToManyModelStub()
model1.id = 1
model2 = OrmMorphToManyModelStub()
model2.id = 2
relation.add_eager_constraints([model1, model2])
def test_attach_inserts_pivot_table_record(self):
flexmock(MorphToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive('from_').once().with_args('taggables').and_return(query)
query.should_receive('insert').once()\
.with_args(
[{
'taggable_id': 1,
'taggable_type': relation.get_parent().__class__.__name__,
'tag_id': 2,
'foo': 'bar',
}])\
.and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive('get_query').and_return(mock_query_builder)
mock_query_builder.should_receive('new_query').once().and_return(query)
relation.should_receive('touch_if_touching').once()
relation.attach(2, {'foo': 'bar'})
def test_detach_remove_pivot_table_record(self):
flexmock(MorphToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive('from_').once().with_args('taggables').and_return(query)
query.should_receive('where').once().with_args('taggable_id', 1).and_return(query)
query.should_receive('where').once()\
.with_args('taggable_type', relation.get_parent().__class__.__name__).and_return(query)
query.should_receive('where_in').once().with_args('tag_id', [1, 2, 3])
query.should_receive('delete').once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive('get_query').and_return(mock_query_builder)
mock_query_builder.should_receive('new_query').once().and_return(query)
relation.should_receive('touch_if_touching').once()
self.assertTrue(relation.detach([1, 2, 3]))
def test_detach_clears_all_records_when_no_ids(self):
flexmock(MorphToMany, touch_if_touching=lambda: True)
relation = self._get_relation()
query = flexmock()
query.should_receive('from_').once().with_args('taggables').and_return(query)
query.should_receive('where').once().with_args('taggable_id', 1).and_return(query)
query.should_receive('where').once()\
.with_args('taggable_type', relation.get_parent().__class__.__name__).and_return(query)
query.should_receive('where_in').never()
query.should_receive('delete').once().and_return(True)
mock_query_builder = flexmock()
relation.get_query().should_receive('get_query').and_return(mock_query_builder)
mock_query_builder.should_receive('new_query').once().and_return(query)
relation.should_receive('touch_if_touching').once()
self.assertTrue(relation.detach())
def _get_relation(self):
builder, parent = self._get_relation_arguments()[:2]
return MorphToMany(builder, parent, 'taggable', 'taggables', 'taggable_id', 'tag_id')
def _get_relation_arguments(self):
parent = flexmock(Model())
parent.should_receive('get_morph_name').and_return(parent.__class__.__name__)
parent.should_receive('get_key').and_return(1)
parent.should_receive('get_created_at_column').and_return('created_at')
parent.should_receive('get_updated_at_column').and_return('updated_at')
query = flexmock(QueryBuilder(MockConnection().prepare_mock(), QueryGrammar(), QueryProcessor()))
flexmock(Builder)
builder = Builder(query)
builder.should_receive('get_query').and_return(query)
related = flexmock(Model())
builder.set_model(related)
builder.should_receive('get_model').and_return(related)
related.should_receive('get_key_name').and_return('id')
related.should_receive('get_table').and_return('tags')
related.should_receive('get_morph_name').and_return(parent.__class__.__name__)
builder.get_query().should_receive('join').once().with_args('taggables', 'tags.id', '=', 'taggables.tag_id')
builder.should_receive('where').once().with_args('taggables.taggable_id', '=', 1)
builder.should_receive('where').once().with_args('taggables.taggable_type', parent.__class__.__name__)
return builder, parent, 'taggable', 'taggables', 'taggable_id', 'tag_id', 'relation_name', False
class OrmMorphToManyModelStub(Model):
__guarded__ = []
class OrmMorphToManyModelPivotStub(Model):
__guarded__ = []
def __init__(self):
super(OrmMorphToManyModelPivotStub, self).__init__()
self.pivot = OrmMorphToManyPivotStub()
class OrmMorphToManyPivotStub(object):
pass
| [
"sebastien.eustace@gmail.com"
] | sebastien.eustace@gmail.com |
db54103a8e02fc43379d24cc3ceb775e95ccf87f | 42e5ed5df5d8c8141691426a8f1996d955584a5e | /sprzet/urls.py | a364724a3a9177f575b8e3bc7bdc514455718412 | [] | no_license | pomidorki185ic/wypozyczalnia | 8ac71d1b5f438b8569ce756279d8a43805c1797a | c57ce4f5e48421b7562183f0b76da6920b2c0bca | refs/heads/master | 2023-05-01T16:15:19.980479 | 2021-05-19T11:00:54 | 2021-05-19T11:00:54 | 317,298,366 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.urls import path
from django.views.generic.base import TemplateView
from . import views
urlpatterns = [
path('spis_sprzetu', views.spis_sprzetu, name = 'spis_sprzetu'),
# path('profilKlienta/rejestracja', views.rejestracja, name = 'profilKlienta/rejestracja'),
#path('profilKlienta/profilKlienta/rejestracja', views.AboutView, name = 'home'),
]
| [
"dawidpawlowski98@wp.pl"
] | dawidpawlowski98@wp.pl |
9781044e5880f01dc60c019e6cb074879a5f0e35 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/registry_usage_py3.py | 4580c01f22c3d11f336e70cb9766cd0fb3f80ed2 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,529 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegistryUsage(Model):
"""The quota usage for a container registry.
:param name: The name of the usage.
:type name: str
:param limit: The limit of the usage.
:type limit: long
:param current_value: The current value of the usage.
:type current_value: long
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes'
:type unit: str or
~azure.mgmt.containerregistry.v2018_09_01.models.RegistryUsageUnit
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'long'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(self, *, name: str=None, limit: int=None, current_value: int=None, unit=None, **kwargs) -> None:
super(RegistryUsage, self).__init__(**kwargs)
self.name = name
self.limit = limit
self.current_value = current_value
self.unit = unit
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
879917b46e8c4961318c964911dd89f79325b030 | a974de6285db76b669937e9619a805c226bb11a6 | /04_The_Path_of_Python/T-resource_Python_201904/ex/ex4_3.py | 3cdbf41669af8c237a20b793e2c8e0d841b68d20 | [] | no_license | Vincent105/python | 1b3f753e6b9db711e320b53c3e0a04c9fc818c37 | 65351234310a81a85331b0f11aef42507868774d | refs/heads/master | 2021-08-03T21:06:57.977065 | 2021-07-30T09:02:56 | 2021-07-30T09:02:56 | 185,359,652 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | # ex4_3.py
f = open("out.txt",mode="w")
print(" 姓名 國文 英文 總分 平均",file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪冰儒", 98, 90, 188, 188/2),file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪雨星", 96, 95, 191, 191/2),file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪冰雨", 92, 88, 180, 180/2),file=f)
print("%3s %4d %4d %4d %3.1f" % ("洪星宇", 93, 97, 190, 190/2),file=f)
f.close()
| [
"vincent1050917@gmail.com"
] | vincent1050917@gmail.com |
c2365e9ea85baa1801be84fa21fd6de815a01787 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/147_v2/hundred_days.py | 93e78caccfce0c3ed9b677ebafe9b895680e3cc5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 452 | py | from datetime import date
from dateutil.rrule import *
import dateutil
TODAY = date(year=2018, month=11, day=29)
def get_hundred_weekdays(start_date=TODAY):
"""Return a list of hundred date objects starting from
start_date up till 100 weekdays later, so +100 days
skipping Saturdays and Sundays"""
data = rrule(DAILY,count=100,byweekday=range(0,5),dtstart=start_date)
return [value.date() for value in data]
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
9059dc9806200db1b678dcd1aab3278e172c9e60 | 8e1be167066e30eff91c26c0757211cf3cf8b016 | /django/full_stack/login_and_registration/apps/users/migrations/0001_initial.py | 859f377ff7d427d869acba8cbbf4213ceebab8a9 | [] | no_license | dojo-solutions/online-ft-python | 074d0ba968f5a77eaec1bca0904232f2aa29051a | b4f6941d0bba376d121a40a6429b815d5b03c32f | refs/heads/master | 2020-04-21T11:52:31.390772 | 2019-03-02T01:27:54 | 2019-03-02T01:27:54 | 169,542,448 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-02-25 17:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('pw_hash', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"wes@tao.team"
] | wes@tao.team |
846bfc48306db4e64463137f1678d3024cae52f4 | d6952f048727add5b54a521d04f6c9b5889bcd35 | /pollination_sdk/models/new_recipe_package.py | 8be4cb90b14461dfbef3d33cd2532f98983cdceb | [] | no_license | TfedUD/python-sdk | bf719644041c2ab7b741af9c7fb8e5acfe085922 | 7ddc34611de44d2f9c5b217cf9b9e7cec27b2a27 | refs/heads/master | 2023-08-10T21:13:45.270193 | 2021-06-21T14:48:36 | 2021-06-21T14:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,401 | py | # coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.13.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pollination_sdk.configuration import Configuration
class NewRecipePackage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'manifest': 'Recipe',
'readme': 'str'
}
attribute_map = {
'manifest': 'manifest',
'readme': 'readme'
}
def __init__(self, manifest=None, readme='', local_vars_configuration=None): # noqa: E501
"""NewRecipePackage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._manifest = None
self._readme = None
self.discriminator = None
self.manifest = manifest
if readme is not None:
self.readme = readme
@property
def manifest(self):
"""Gets the manifest of this NewRecipePackage. # noqa: E501
The Recipe manifest to be created # noqa: E501
:return: The manifest of this NewRecipePackage. # noqa: E501
:rtype: Recipe
"""
return self._manifest
@manifest.setter
def manifest(self, manifest):
"""Sets the manifest of this NewRecipePackage.
The Recipe manifest to be created # noqa: E501
:param manifest: The manifest of this NewRecipePackage. # noqa: E501
:type manifest: Recipe
"""
if self.local_vars_configuration.client_side_validation and manifest is None: # noqa: E501
raise ValueError("Invalid value for `manifest`, must not be `None`") # noqa: E501
self._manifest = manifest
@property
def readme(self):
"""Gets the readme of this NewRecipePackage. # noqa: E501
The README file to attach to this package # noqa: E501
:return: The readme of this NewRecipePackage. # noqa: E501
:rtype: str
"""
return self._readme
@readme.setter
def readme(self, readme):
"""Sets the readme of this NewRecipePackage.
The README file to attach to this package # noqa: E501
:param readme: The readme of this NewRecipePackage. # noqa: E501
:type readme: str
"""
self._readme = readme
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewRecipePackage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewRecipePackage):
return True
return self.to_dict() != other.to_dict()
| [
"antoinedao1@gmail.com"
] | antoinedao1@gmail.com |
28ed22db5c9fb33f2e867f2a54595fb76d76c037 | 4820b6d9665a487b7bd3f91a64354110dc61d31f | /palindrome permutations 2.py | c23d54d40333fba9768cfa1a99410ea2efd0d478 | [] | no_license | iamshivamgoswami/backtracking | 4f5c84342e7bf1865556ef87ee089c249fc73aef | 7e86f7f2852c14335d1f2b4a2a6bc8e577f33c3d | refs/heads/main | 2023-07-03T07:03:27.100901 | 2021-07-27T07:27:25 | 2021-07-27T07:27:25 | 389,594,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import collections
class Solution:
def generatePalindromes(self, s: str) :
c=collections.Counter(s)
half="".join([k*(v//2) for k,v in c.items() ])
half=[c for c in half]
mid=[k for k,v in c.items() if v%2]
if len(mid)>1:
return []
mid="" if len(mid)==0 else mid[0]
ans=[]
def func(tmp=[],counter=collections.Counter(half)):
if len(tmp)==len(half):
curr="".join(tmp)
ans.append(curr+mid+curr[::-1])
return
else:
for num in counter:
if counter[num] > 0:
tmp.append(num)
counter[num] -= 1
func(tmp, counter)
counter[num] += 1
tmp.pop()
func()
return ans
a=Solution()
print(a.generatePalindromes("aaa"))
| [
"shivamgoswami12@gmail.com"
] | shivamgoswami12@gmail.com |
483fa6de553dfe149fd491fe21befb574ca1242e | 06aa3ec3262f6dd6866ea194ed6385f8e53509bf | /manuscript_codes/AML211DiffTrack/generateNetwork.py | 1fbc726986d5530780c38878ff1d0b29d5980caf | [] | no_license | KuehLabUW/UPSIDE | 95ce078382792d1beb0574c3b19c04e467befa58 | 3c90de9677f24e258800cb95bce6cb528f4ad4ac | refs/heads/master | 2023-07-13T15:58:07.963672 | 2021-08-30T21:14:48 | 2021-08-30T21:14:48 | 329,134,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,187 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 13:04:17 2020
@author: phnguyen
"""
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from graphviz import Digraph
import pandas as pd
import os
def plotNetwork(Matrix):
# the matrix is set up as position i j is direction from element i to j.
# weight of the line is the magnitude of the transition
# size of the node is how much residence the node has
# all is done in graphviz
f = Digraph('cell_state_transition3')
for nidx in range(len(Matrix)):
circ_size = int(Matrix[nidx,nidx]*10)
text_label = False
f.attr('node',shape = 'circle',fixedsize = 'false',width = '{}'.format(circ_size),height = '{}'.format(circ_size))
#f.attr('node',shape = 'circle')
if text_label == True:
if nidx + 1 == 1:
f.node('A1',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 2:
f.node('A2',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 3:
f.node('A3',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 4:
f.node('S1',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 5:
f.node('S2',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 6:
f.node('S3',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 7:
f.node('S4',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
elif nidx + 1 == 8:
f.node('DB',fontsize = '{}'.format(int(Matrix[nidx,nidx]/10)))
else:
f.node('{}'.format(nidx),fontsize = '{}'.format(int(Matrix[nidx,nidx]*100)))
threshold = 0.09
for i in range(len(Matrix)):
for j in range(len(Matrix)):
if i != j and Matrix[i,j] > threshold:
thickness = int(Matrix[i,j]*40)
f.edge('{}'.format(i),'{}'.format(j),penwidth = '{}'.format(2+thickness))
f.view()
#%% now load the the transition matrix
csvs_dirname = '/media/phnguyen/Data2/Imaging/CellMorph/data/AML211DiffTrack/csvs/'
os.chdir(csvs_dirname)
df = pd.read_csv('CombinedUMAPDirFluoClusterTC.csv')
df = df.reset_index()
df = df.replace({'cluster':6}, {'cluster': 4}, regex=True)
#%%
subdf_crit = (df['pos']>0) & (df['pos']<6) & (df['t']>0*20) & (df['t']<90*20+1)
subdf = df[subdf_crit]
subdf = subdf.reset_index()
#get group information
subdf['cluster'] = subdf['cluster'] - 1
cluster = subdf['cluster'].values
#make an empty matrix
AM = np.zeros((len(np.unique(cluster)),len(np.unique(cluster))))
#fill out the adjacent matrix
for c in range(0,len(cluster)):
g_now = subdf.cluster[c]
pos_now = subdf.pos[c]
t_now =subdf.t[c]
pcell = subdf.pcell[c]
if pcell != 0 :
df_partner = subdf[(subdf['pos'] == pos_now) & (subdf['cell'] == pcell) & (subdf['t'] == t_now+1)]
if len(df_partner['cluster']) == 1:
g_partner = df_partner.cluster
AM[g_now,g_partner] = AM[g_now,g_partner] + 1
#print(c)
# Normalize by total transitions in each state
NormF = np.sum(AM,axis = 1)
AMN = AM/NormF[:,None]
# plot the the figure
plotNetwork(AMN)
#%%
#calculate distance traveled
DIST =[];
for c in range(0,len(df)):
x_now = df.Xcenter[c]
y_now = df.Ycenter[c]
t_now =df.t[c]
pos_now = df.pos[c]
pcell = df.pcell[c]
if pcell != 0 :
df_partner = df[(df['pos'] == pos_now) & (df['cell'] == pcell) & (df['t'] == t_now+1)]
if len(df_partner['cluster']) == 1:
x_partner = float(df_partner.Xcenter.values)
y_partner = float(df_partner.Ycenter.values)
dist = np.linalg.norm(np.array((x_now,y_now))-np.array((x_partner,y_partner)))
DIST.append(dist)
else:
DIST.append(0)
else:
DIST.append(0)
df['distance'] = DIST
#df.to_csv('CombinedUMAPDirFluoClusterTCdist.csv')
| [
"kuehlab@uw.edu"
] | kuehlab@uw.edu |
55a82d57386c5a5cf6b2d392e5d774c58f62e1d3 | 2008ff2a5e0a7790c4578d2c8ce402e0bb24bfda | /orm_demo/store/migrations/0002_product_description.py | d7f8266267093ff7c3ade5c4483586a0a25ca433 | [] | no_license | ganqzz/django_demo | 788a44a212cbf9e82b2ca84ba86147a90943756a | c880fc784e7bca4a78709ad76772c924b97dc393 | refs/heads/master | 2023-07-09T07:53:56.549362 | 2021-08-13T11:46:39 | 2021-08-13T11:46:39 | 356,131,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 3.1.3 on 2020-12-20 22:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='description',
field=models.TextField(default='', blank=True),
),
]
| [
"ganqzz@users.noreply.github.com"
] | ganqzz@users.noreply.github.com |
83342e6852c6e51579957f5c07ce95932a2d5367 | 87b8fc1ba1d5a5a9014cf2e69dcabd7df19e1fc2 | /src/pustakalaya_apps/document/migrations/0025_auto_20180510_1559.py | a4ca646f78a9ca9b2cfbb2a94e9d888169e4310b | [] | no_license | Becram/elibrary-olen | 5a885871b05dc9076a04575748a5bbce6b40c69c | f73772569d1a67fda4e5d44af9d1ed2ddd41166e | refs/heads/master | 2020-04-06T11:13:29.676136 | 2018-10-25T06:36:42 | 2018-10-25T06:36:42 | 157,408,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-10 10:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('document', '0024_auto_20180509_1547'),
]
operations = [
migrations.AlterField(
model_name='document',
name='publication_year_on_text',
field=models.CharField(blank=True, max_length=35, null=True, verbose_name='Publication year'),
),
migrations.AlterField(
model_name='document',
name='year_of_available_on_text',
field=models.CharField(blank=True, max_length=35, null=True, verbose_name='Year of available'),
),
]
| [
"nishantkarki305@gmail.com"
] | nishantkarki305@gmail.com |
517cf48de68a5d5a2b690755e4ee2036cd8f8b42 | 65b9a63e8c132f32aeb56961968f5e363bd9a087 | /20191107_Keras乘用车销量预测神经网络训练对比实验/code/run.py | f46def6bde3e7c0d5fea16a16fd4dd136888bbbf | [] | no_license | 346644054/examples2019 | e70f13cfb56c3478fc6e335c730e0e70e70a6226 | 5f9777e7a887e635971156354f56ce065fa3f41e | refs/heads/master | 2022-04-09T03:52:52.973414 | 2020-02-28T03:05:02 | 2020-02-28T03:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,656 | py | # -*- coding: utf-8 -*-
"""
network
fun:
env:
Linux ubuntu 4.4.0-31-generic x86_64 GNU;python 2.7;tensorflow1.10.1;Keras2.2.4
pip2,matplotlib2.2.3
"""
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import keras
import time
import datetime
import proprocess
import network
from keras import models, optimizers
from keras.layers import Dense, Dropout
from keras.models import load_model,model_from_json
from keras import backend as K
from sklearn import preprocessing
import datetime
from sklearn.utils import shuffle
from keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if __name__ == "__main__":
time1 = time.time()
data_path = "dataset/train_sales_data.csv"
#################### ######################
mode = "train" # train test docker
print("#################work mode", mode, "#######################")
if mode == "train":
# 数据预处理
#(trainX, trainY) = proprocess.generate_train_seq(train_images_folder_path, train_track_folder_path)
load_data = proprocess.DataSets
trainX, trainY, validX, validY = load_data.load_passenger_car(data_path)
model = network.build_network()
history = network.train_network(trainX, trainY, validX, validY, model, epochs=1000)
network.plt_result(history, "output", "history.png")
elif mode == "test":
network.helloworld()
else:
print("mode error!")
time2 = time.time()
print('time use:' + str(time2 - time1) + 's')
| [
"elesun2018@gmail.com"
] | elesun2018@gmail.com |
ec037038c62bc775b8da6c8634bf9e729c1f0797 | b2319c5e14c94edfb5a39e4c490c1ae6183651ed | /deepgoweb/apps/accounts/models.py | bc9bce7da4d76623d803e2b74b30143d7724999e | [] | no_license | coolmaksat/deepgoweb | 6d67f45059d7bdb4548d50c182a038c6f9c70a31 | fd4904b6b18dd2af06e000679f406b7353a3534f | refs/heads/master | 2021-06-12T14:42:14.513686 | 2021-04-17T10:23:39 | 2021-04-17T10:23:39 | 161,017,035 | 0 | 0 | null | 2018-12-09T07:49:26 | 2018-12-09T07:49:26 | null | UTF-8 | Python | false | false | 1,119 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save, pre_save
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
GENDER_CHOICES = (
(0, 'Male'),
(1, 'Female'))
def check_unique_email(sender, instance, **kwargs):
if instance.email and sender.objects.filter(
email=instance.email).exclude(username=instance.username).count():
raise ValidationError(_("The email %(email)s already exists!") % {
'email': instance.email
})
pre_save.connect(check_unique_email, sender=User)
class UserProfile(models.Model):
user = models.OneToOneField(
User, primary_key=True, on_delete=models.CASCADE)
birth_date = models.DateField(null=True)
gender = models.IntegerField(choices=GENDER_CHOICES, default=0)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(
user=instance)
post_save.connect(create_user_profile, sender=User)
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
1e82bb07d3892933aa9f580b0e62ec13e781beb6 | 22d7d575737eb7d864926993163f73d0bfebd6bc | /programmers/level2/순위검색.py | dad1c07219f4a914dd6624fef568047f9320b7e9 | [] | no_license | taehyungz/Algorithm | 6c624460716424115d3c38587f176eeb0a4e00d9 | 87c02dd047152a5bb4fafcf51be53c329ad563dc | refs/heads/master | 2022-06-26T13:23:50.969544 | 2022-06-23T12:29:04 | 2022-06-23T12:29:04 | 202,279,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | from itertools import combinations
import collections
from bisect import *
def solution(info, query):
answer = []
people = collections.defaultdict(list)
for person in info:
person_info = person.split()
person_strs = person_info[:-1]
wage = int(person_info[-1])
people[''.join(person_strs)].append(wage)
for num in range(1,5):
num_list = combinations(range(4), num)
for idxs in num_list:
tperson_strs = person_strs[:]
for idx in idxs:
tperson_strs[idx] = '-'
people[''.join(tperson_strs)].append(wage)
for key in people.keys():
people[key].sort()
for q in query:
ans = 0
qsplit = q.split(" and ")
qsplit.extend(qsplit.pop().split())
wage = int(qsplit.pop())
find = people[''.join(qsplit)]
ans += len(find) - bisect_left(find, wage)
answer.append(ans)
return answer | [
"thkthk97@naver.com"
] | thkthk97@naver.com |
d6891e0277a475c88b44289a0aa0412e9a07c473 | ce661026009d622db924080d85ab529f1cae6b60 | /projecteuler.net/wip,74.py | 224eff7255f3ca4eefe694ca10860e3677307103 | [] | no_license | predavlad/projecteuler | d54f5d85ab0133b19b54b4168990b90f09a0184c | 58e1637733bb7e01e44bfac707353ecfe84d9b19 | refs/heads/master | 2021-01-23T15:29:26.257019 | 2019-02-09T10:11:23 | 2019-02-09T10:11:23 | 12,952,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,554 | py | import time
# 170 seconds, although it can be improved if we count all permutations of a number at the same time, instead of
# going through everything
start_time = time.time()
fact_cache = {}
dig_fact_cache = {}
loop_cache = {}
def fact(n):
global fact_cache
if n in [0, 1]:
return 1
if n not in fact_cache:
fact_cache[n] = n * fact(n - 1)
return fact_cache[n]
def digit_fact_sum(n):
global dig_fact_cache
if n in dig_fact_cache:
return dig_fact_cache[n]
return sum(map(fact, map(int, str(n))))
def get_loop_count(n):
global loop_cache
orig_n = n
is_loop = False
chain = {}
while not is_loop:
new_n = digit_fact_sum(n)
chain[n], n = new_n, new_n
if new_n in chain:
is_loop = True
chain_len = len(chain)
current = orig_n
while current != new_n:
loop_cache[current] = chain_len
current = chain[current]
chain_len -= 1
for i in range(chain_len):
loop_cache[current] = chain_len
current = chain[current]
return len(chain)
assert get_loop_count(69) == 5
assert get_loop_count(145) == 1
assert get_loop_count(540) == 2
assert get_loop_count(78) == 4
assert get_loop_count(1479) == 60
LIMIT = 10 ** 3
COUNT = 60
counter = 0
for i in xrange(LIMIT):
if i not in loop_cache:
nr = get_loop_count(i)
if loop_cache[i] == COUNT:
counter += 1
if counter % 10 == 0:
print counter, i
print counter
print time.time() - start_time, "seconds"
| [
"preda.vlad@yahoo.com"
] | preda.vlad@yahoo.com |
812662b6d90892d254a75762deed638a32163b4f | ffadf9541d01cf9af20c419759d48b1eb01bfd35 | /pachong/PCdemo1/day16/数据格式转存.py | 346d8cb0bafaacbd1e96a7fe95ccb5ff16e63996 | [] | no_license | 1987617587/lsh_py | b1bb1016eaafcba03bbc4a5310c1db04ae227af4 | 80eb5175cd0e5b3c6c5e2ebb906bb78d9a8f9e0d | refs/heads/master | 2021-01-02T05:14:31.330287 | 2020-06-20T05:18:23 | 2020-06-20T05:18:23 | 239,498,994 | 2 | 1 | null | 2020-06-07T23:09:56 | 2020-02-10T11:46:47 | Python | UTF-8 | Python | false | false | 3,073 | py | # author:lsh
# datetime:2020/4/14 14:39
'''
.::::. _oo0oo_
.::::::::. o8888888o
::::::::::: 88" . "88
..:::::::::::' (| -_- |)
'::::::::::::' 0\ = /0
.:::::::::: ___/`---'\___
'::::::::::::::.. .' \\| |# '.
..::::::::::::. / \\||| : |||# \
``:::::::::::::::: / _||||| -:- |||||- \
::::``:::::::::' .:::. | | \\\ - #/ | |
::::' ':::::' .::::::::. | \_| ''\---/'' |_/ |
.::::' :::: .:::::::'::::. \ .-\__ '-' ___/-. /
.:::' ::::: .:::::::::' ':::::. ___'. .' /--.--\ `. .'___
.::' :::::.:::::::::' ':::::. ."" '< `.___\_<|>_/___.' >' "".
.::' ::::::::::::::' ``::::. | | : `- \`.;`\ _ /`;.`/ - ` : | |
...::: ::::::::::::' ``::. \ \ `_. \_ __\ /__ _/ .-` / /
```` ':. ':::::::::' ::::.. `-.____`.___ \_____/___.-`___.-'
'.:::::' ':'````.. `=---='
女神保佑 永无BUG 佛祖保佑 永无BUG
'''
import pandas as pd
import csv
import codecs
import json
# csv===> excel
# df = pd.read_csv('./data/a.csv',encoding='utf-8')
# df.to_excel('./data/a.xlsx',sheet_name='csv转excel')
# excel ===> csv
# df_xls = pd.read_excel('./data/a.xlsx',index_col=0)
# df_xls.to_csv('./data/b.csv',encoding='utf-8')
# csv ===>json
df_csv = pd.read_csv('./data/a.csv',encoding='utf-8')
count = df_csv.shape[0] # 获取行数
with open('./data/b.json','w',encoding='utf-8') as file:
for i in range(count):
d = {
'0':df_csv.iloc[i,0],
'1':df_csv.iloc[i,1],
'2':df_csv.iloc[i,2],
}
file.write(json.dumps(d)+'\n')
# json ===>csv
with open('./data/a.json','w',encoding='utf-8') as file1:
with open('./data/c.csv', 'w', encoding='utf-8') as file2:
wr = csv.writer(file2)
wr.writerow(['1','2','3'])
line = file1.readline()
while line:
d = json.loads(line)
wr.writerow([d['1'],d['2'],d[3]]) | [
"1987617587@qq.com"
] | 1987617587@qq.com |
d8e589c781a6a06fefb67ae1e521c503eb350e17 | 9de64f94ffe3b57de373bebdd5344d0d4e725a9c | /lib/models/__init__.py | 2c735037871bae9eb7ff1fda8bc6d6c6e4333962 | [
"MIT"
] | permissive | CoinCheung/BiSeNet | 1196ed0463d067e8b145b716aae39cf1963f4ffb | f2b901599752ce50656d2e50908acecd06f7eb47 | refs/heads/master | 2023-02-17T08:45:51.374875 | 2023-02-05T03:08:32 | 2023-02-05T03:08:32 | 159,607,436 | 1,312 | 322 | MIT | 2023-02-05T03:08:33 | 2018-11-29T04:27:51 | Python | UTF-8 | Python | false | false | 146 | py |
from .bisenetv1 import BiSeNetV1
from .bisenetv2 import BiSeNetV2
model_factory = {
'bisenetv1': BiSeNetV1,
'bisenetv2': BiSeNetV2,
}
| [
"867153576@qq.com"
] | 867153576@qq.com |
91d121b6b72945f63cdeb6e0e53dff0100fbec44 | 4cc285b0c585241ff4404087e6fbb901195639be | /NeuralNetworkNumbers/venv/Lib/site-packages/sklearn/datasets/tests/test_covtype.py | bd775d75563d31b490373811c09ca4abba681004 | [] | no_license | strazhg/NeuralNetworksPython | 815542f4ddbb86e918e657f783158f8c078de514 | 15038e44a5a6c342336c119cdd2abdeffd84b5b1 | refs/heads/main | 2023-04-16T18:51:29.602644 | 2021-04-27T14:46:55 | 2021-04-27T14:46:55 | 361,944,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f48c1479cc19ae778036630c6d97033d44099090499f8db51ddd327f13e7f9fd
size 1677
| [
"golubstrazh@gmail.com"
] | golubstrazh@gmail.com |
ece5d3da37a3165a695a7b77b2fc0daaf58c442c | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/main_20171106230505.py | 0f73c604abb8fcbe3b81c435a1da27dfbf8b824e | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | # DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list
def generate_rounds(self):
# Let's generate our random rounds from scratch
round_data = { }
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Create our gendered rounds
if(not gender in round_data):
# Default Round Cap
roundCap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ gender: [ { "_roundCap": roundCap } ] })
# Create our round data from players
rnd_players = random.sample(players[gender], len(players[gender]))
for i in range(int(len(rnd_players) / 2 )):
# Grab our versus players
playerOne = rnd_players[i * 2]
playerTwo = rnd_players[(i * 2) + 1]
round_data[gender].append({ playerOne.name(): 0, playerTwo.name(): 0 })
print(round_data)
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App() | [
"me@reecebenson.me"
] | me@reecebenson.me |
1327bb0c4a5e509d0c7c62b0352b208928bf34fe | 4ff0ff57e0fee60caf90cf1a2319b7615858b5ff | /cw_hr_holidays_extended/reports/__init__.py | f81bbb1467b1c4b3122cfce1a6ad1b2c93ff037b | [] | no_license | akradore/ACC_12 | 257a590acfb1afc92122e46b6db0ccbfdb3969be | 5ed668bda8177586695f5dc2e68a48806eccf976 | refs/heads/master | 2023-03-17T08:53:58.822549 | 2020-02-24T12:32:05 | 2020-02-24T12:32:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | # -*- coding: utf-8 -*-
from . import holidays_summary_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"arun01@mmproject.net"
] | arun01@mmproject.net |
b808d02359c77895488e059565589a1e8d9c1703 | 9734c93c86c982b1ce046340bac9e53645b261b8 | /tests/cli/helpers/codepage.py | 136905dc4ab533842e791e8dbbc543f7b954b2ed | [
"Apache-2.0"
] | permissive | log2timeline/plaso | cd72dd407d6c5627506c14f58cb8f6a6926aa808 | d6022f8cfebfddf2d08ab2d300a41b61f3349933 | refs/heads/main | 2023-09-02T08:43:48.241198 | 2023-08-19T07:28:12 | 2023-08-19T07:28:12 | 23,812,315 | 1,506 | 421 | Apache-2.0 | 2023-09-04T08:24:53 | 2014-09-08T23:29:28 | Python | UTF-8 | Python | false | false | 1,704 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the codepage CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import codepage
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class CodepagergumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the codepage CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--codepage CODEPAGE]
Test argument parser.
{0:s}:
--codepage CODEPAGE The preferred codepage, which is used for decoding
single-byte or multi-byte character extracted strings.
""".format(cli_test_lib.ARGPARSE_OPTIONS)
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
codepage.CodepageArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.preferred_codepage = 'cp1252'
test_tool = tools.CLITool()
codepage.CodepageArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._preferred_codepage, options.preferred_codepage)
with self.assertRaises(errors.BadConfigObject):
codepage.CodepageArgumentsHelper.ParseOptions(options, None)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | log2timeline.noreply@github.com |
eb98e85b9ab7e02cbf47d85a6b890c00abf277a8 | e3eead40e93fdf5186269536edefab4f08e9a5a2 | /LeetCode/161-one_edit_distance.py | d2fda9fcd5d0e610b12400ba057379ba56eaf180 | [] | no_license | davll/practical-algorithms | bbc930b42363cae00ce39e8a686854c19131d334 | 0e35e4cc87bd41144b8e34302aafe776fec1b356 | refs/heads/master | 2021-08-22T13:12:34.555074 | 2020-03-28T08:56:13 | 2020-03-28T08:56:13 | 147,224,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | def one_edit_distance(s1, s2):
n1, n2 = len(s1), len(s2)
l, r = 0, 0
while l < n1 and l < n2:
if s1[l] == s2[l]:
l += 1
else:
break
n1, n2 = n1-l, n2-l
while r < n1 and r < n2:
if s1[-1-r] == s2[-1-r]:
r += 1
else:
break
n1, n2 = n1-r, n2-r
return max(n1, n2) == 1
class Solution:
def isOneEditDistance(self, s: str, t: str) -> bool:
return one_edit_distance(s, t)
| [
"davll.xc@gmail.com"
] | davll.xc@gmail.com |
1ca107d39fcf682c12bd191ef447080ea774f49a | 6b05bddf2e294c8e1b39846aecadfa06b4ff805d | /test/test_v1_generation_status.py | 121f9db48f40ba73016ca068c091830bc77627e5 | [
"Apache-2.0"
] | permissive | kubevirt/client-python | 5ca82fe55d48c07f62796d2bed3605a7c189922c | 235fe17f58d41165010be7e4122cb67bdc866fe7 | refs/heads/master | 2023-09-03T12:25:27.272479 | 2023-08-17T00:33:31 | 2023-08-17T00:33:31 | 105,017,761 | 29 | 25 | Apache-2.0 | 2022-10-20T13:52:10 | 2017-09-27T12:51:32 | Python | UTF-8 | Python | false | false | 925 | py | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_generation_status import V1GenerationStatus
class TestV1GenerationStatus(unittest.TestCase):
""" V1GenerationStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GenerationStatus(self):
"""
Test V1GenerationStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_generation_status.V1GenerationStatus()
pass
if __name__ == '__main__':
unittest.main()
| [
"kubevirt-bot"
] | kubevirt-bot |
ca0d61bc85b983481ef9efb4c5f39e48b6622d3b | 2709e527c217a8264b48e2f549b3284e5ccb9551 | /0x09-python-everything_is_object/100-magic_string.py | f70d7350af05a64e469e472edab507ed4a90ae61 | [] | no_license | kwhit2/holbertonschool-higher_level_programming | 489d6b88ed14b9f2efd4637d8a71ae569b5027f6 | 2660516b12fee0f03c4025ba1d8d2762a8880a06 | refs/heads/main | 2023-05-22T17:57:02.035803 | 2021-06-12T18:43:54 | 2021-06-12T18:43:54 | 319,346,696 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | #!/usr/bin/python3
def magic_string(str=[]):
str.append("Holberton") # this also works: str += ["Holberton"]
return (", ".join(str))
| [
"kfw2@outlook.com"
] | kfw2@outlook.com |
b0ed5be72b248dee34ba8ffdeb7e2c8fc09ba3c3 | e14372adf86d3c4f9e73c9f7111db3215c696c3d | /1.入门/二级/LE8.py | c0e3f65646dff938600e801494d64b31300cde86 | [] | no_license | hewei-bit/PYTHON_learning | 71ddd7560a52575528547187f4fb40f39a3cbbdb | 18de8e5bdca165df5a5a4b5e0887846593656f4e | refs/heads/master | 2022-12-02T13:38:05.907135 | 2020-08-13T04:57:41 | 2020-08-13T04:57:41 | 261,647,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import jieba
txt = open("threekingdoms.txt","r",encoding='utf-8').read()
excludes = {"商议","如何","主公","军士","将军","却说","不可","不能","如此","二人","荆州"}
words = jieba.lcut(txt)
print(words)
counts = {}
for word in words:
if len(word) == 1:
continue
elif word == "诸葛亮" or word == "孔明曰":
rword = "孔明"
elif word=="关公" or word == "云长":
rword = "关羽"
elif word=="玄德" or word == "玄德曰":
rword = "刘备"
elif word=="孟德" or word == "丞相":
rword = "曹操"
else:
rword = word
counts[rword] = counts.get(rword,0) + 1
for word in excludes:
del counts[word]
items = list(counts.items())
items.sort(key = lambda x:x[1],reverse = True)
for i in range(10):
word,count = items[i]
print("{0:<15}{1:>5}".format(word,count))
'''
def getText():
txt = open("hamlet.txt","r").read()
txt = txt.lower()
for ch in "!@#$%^&*+_-,./{|}~`‘’“”;:[\\]?=":
txt = txt.replace(ch,"")
return txt
hamletTxt = getText()
words = hamletTxt.split()
counts = {}
for word in words:
counts[word] = counts.get(word,0) + 1
items = list(counts.items())
items.sort(key=lambda x:x[1],reverse = True)
for i in range(10):
word,count = items[i]
print("{0:<10}{1:>5}".format(word,count))
'''
'''
def getNum():
nums = []
iNumStr = input("请输入数字(回车退出)")
while iNumStr != '':
nums.append(eval(iNumStr))
iNumStr = input("请输入数字(回车退出)")
return nums
def mean(numbers):
isum = 0.0
for i in numbers:
isum += i
return isum/len(numbers)
def dev(numbers,mean):
sdev = 0.0
for num in numbers:
sdev = sdev + (num - mean)**2
return pow(sdev / (len(numbers)-1),0.5)
def median(numbers):
sorted(numbers)
size = len(numbers)
if size % 2 == 0:
med = (numbers[size//2-1] + numbers[size//2])/2
else:
med = numbers[size//2]
return med
n = getNum()
m = mean(n)
print("平均值:{},方差:{},中位数:{} ".format(m,dev(n,m),median(n)))
'''
| [
"1003826976@qq.com"
] | 1003826976@qq.com |
1e8b36d3decd0e0499113472c2e39046014353fa | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/16/51/8.py | 0a0a76035762a98ccb3b14bab09892bf3db8e467 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sys
import os
class Stack(object):
def __init__(self, N):
self.a = ['X'] * N
self.n = 0
def push(self, x):
self.a[self.n] = x
self.n += 1
def top(self):
return self.a[self.n-1]
def pop(self):
self.n -= 1
return self.a[self.n]
def main():
T = int(sys.stdin.readline())
for t in xrange(1, T+1):
s = sys.stdin.readline().strip()
ret = 0
st = Stack(len(s))
for x in s:
if st.n and st.top() == x:
ret += 10
st.pop()
else:
st.push(x)
ret += 5 * st.n/2
print "Case #%d: %s" % (t, ret)
if __name__ == '__main__':
main()
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
380a93700651bc938a605253ad4f55098290336a | 2d358ffb51f03cc64cc2da0f684b0928aebe139c | /test2/booktest/migrations/0002_areainfo.py | 5907812c65cd3ab0178138341bec716118c0025e | [] | no_license | 853695319/learningdjango | 195ffabdbd3a5b6bc4386cbb678504c0d2cd0095 | d2aac1117bb2ca31e4f247a9d206adcf3a9f39a2 | refs/heads/master | 2020-05-03T04:59:16.094900 | 2019-04-23T06:25:02 | 2019-04-23T06:25:02 | 178,437,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('booktest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AreaInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('atitle', models.CharField(max_length=20)),
('aparent', models.ForeignKey(blank=True, null=True, to='booktest.AreaInfo')),
],
),
]
| [
"853695319@qq.com"
] | 853695319@qq.com |
5beb6ad74c365944a9dacad8a0775794bd47beee | 8dc7bafb4b20f7b556ca733c8a1e46ae8dd8d4cb | /accounting/accounting/doctype/payment_entry/payment_entry.py | 19e889378bebce608bf0293a5597c21df65c02c0 | [
"MIT"
] | permissive | MohamedAbdulsalam96/accounting | 06d50f236ae94cf7741bf2750a098ae18fa02fe0 | e4665df4dce863e0703b8b733255d200aa953c1c | refs/heads/master | 2023-05-30T19:19:14.365102 | 2021-06-08T10:08:36 | 2021-06-08T10:08:36 | 630,869,866 | 1 | 0 | null | 2023-04-21T10:42:57 | 2023-04-21T10:42:57 | null | UTF-8 | Python | false | false | 389 | py | # Copyright (c) 2021, ac and contributors
# For license information, please see license.txt
from accounting.accounting.doctype.gl_entry.utils import create_gl_entry
from frappe.model.document import Document
class PaymentEntry(Document):
def on_submit(self):
create_gl_entry(self, self.account_paid_to, self.amount, 0)
create_gl_entry(self, self.account_paid_from, 0, self.amount)
| [
"anand21nanda@gmail.com"
] | anand21nanda@gmail.com |
567fc4698f2d54fee0f1380d1be5135399468276 | eef614ad302e1662f51fe6d8a880cfeef10bd3b1 | /tests/functional_tests/accounts/test_signup.py | 75dec47aad56a16052232ca9d3e7a76b518e1b3a | [
"MIT"
] | permissive | gitter-badger/apostello | a334a5a63a833a9e4a84f5872d44da7bdccc5477 | d6eeacf7a726aa33a79676fdd3d05b25a05b0f70 | refs/heads/master | 2021-01-18T18:59:36.111493 | 2016-03-17T20:01:08 | 2016-03-17T20:01:08 | 54,472,652 | 0 | 0 | null | 2016-03-22T12:16:51 | 2016-03-22T12:16:50 | null | UTF-8 | Python | false | false | 3,403 | py | import pytest
from django.contrib.auth.models import User
from django.core import mail
from site_config.models import SiteConfiguration
@pytest.mark.django_db
@pytest.mark.slow
class TestSignup:
def test_sign_up(self, live_server, browser, users):
"""
Tests the sign up form and checks that the appropriate emails
have been sent afterwards.
"""
# signup
uri = '/accounts/signup'
browser.get(live_server + uri)
email_box = browser.find_elements_by_name('email')[0]
email_box.send_keys('testsignupemail@example.com')
password_box1 = browser.find_elements_by_name('password1')[0]
password_box1.send_keys('top_secret')
password_box2 = browser.find_elements_by_name('password2')[0]
password_box2.send_keys('top_secret')
login_button = browser.find_elements_by_xpath(
'html/body/div/div/form/button'
)[0]
login_button.click()
# check we have been redirected
assert '/accounts/confirm-email/' in browser.current_url
assert len(mail.outbox) == 1
# assert '[apostello] New User' in mail.outbox[0].subject # not sent
# when we have no office email set
assert 'Please Confirm Your E-mail Address' in mail.outbox[0].subject
for x in mail.outbox[0].body.split():
if x.startswith('http'):
confirm_url = x
browser.get(confirm_url)
confirm_button = browser.find_element_by_class_name('button')
confirm_button.click()
user = User.objects.get(email='testsignupemail@example.com')
assert not user.is_staff
assert not user.is_superuser
def test_first_user_sign_up(self, live_server, browser):
"""
Tests the sign up form and checks that the appropriate emails
have been sent afterwards.
Then we confirm the email and verify the user has been made an admin.
"""
# add an office email to test correct email is sent on sign up
config = SiteConfiguration.get_solo()
config.office_email = 'test@apostello.ninja'
config.save()
# signup
uri = '/accounts/signup'
browser.get(live_server + uri)
email_box = browser.find_elements_by_name('email')[0]
email_box.send_keys('testsignupemail@example.com')
password_box1 = browser.find_elements_by_name('password1')[0]
password_box1.send_keys('top_secret')
password_box2 = browser.find_elements_by_name('password2')[0]
password_box2.send_keys('top_secret')
login_button = browser.find_elements_by_xpath(
'html/body/div/div/form/button'
)[0]
login_button.click()
# check we have been redirected
assert '/accounts/confirm-email/' in browser.current_url
assert len(mail.outbox) == 2
assert '[apostello] New User' in mail.outbox[0].subject
assert 'Please Confirm Your E-mail Address' in mail.outbox[1].subject
for x in mail.outbox[1].body.split():
if x.startswith('http'):
confirm_url = x
browser.get(confirm_url)
confirm_button = browser.find_element_by_class_name('button')
confirm_button.click()
user = User.objects.get(email='testsignupemail@example.com')
assert user.is_staff
assert user.is_superuser
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
52bdf9b51ef3cb5ff7e208165146ae83d5eef6a2 | 71bc873c20fbc45bb5e13095d2474496818a23f9 | /code word2vec_experiment_/district_stop_words.py | 397ba3344690383eea7c5304e4c07ce74bdefc1f | [] | no_license | 2877992943/lianyun | f31c44ea2e266bae51cae4fa464d1bae368c8d3f | a872d6cd1b2eff402bcccb326d33d086816d87af | refs/heads/master | 2021-01-20T16:17:20.226401 | 2017-05-10T06:49:31 | 2017-05-10T06:49:31 | 90,830,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | #! -*- coding:utf-8 -*-
import pandas as pd
import sys,os,re
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
if __name__=='__main__':
path='../backup/2014_district.csv'
df=pd.read_csv(path,encoding='utf-8')
ll=df['dis'].values.tolist()
ll_clean=[]
ll_clean1=[]# more than 3 words ,strip 省
for item in ll:
if item==np.nan:continue
item=re.sub('[\s+]','',item)
ll_clean.append(item)
###
if item.decode('utf-8').__len__()>2:ll_clean1.append(item[:-1])
print len(ll_clean),' '.join(ll_clean)
print len(ll_clean1),' '.join(ll_clean1)
####
pd.to_pickle(ll_clean1+ll_clean,'../data/district_noise')
| [
"2877992943@qq.com"
] | 2877992943@qq.com |
58ac3cb19d3a3277dc1dda822c0f890412c72177 | 2218e1da5cb944e4509f8641ca051de137645c5e | /剑指 Offer/First/14-1.cuttingRope.py | f9cc32be30464432e53c133a87cfd362aabb6fd1 | [] | no_license | Hegemony/Python-Practice | 9e76ebb414433e51c2074602fb0a871891647839 | b68ea41688e9e305635c63fdc43402e2b6fe6524 | refs/heads/main | 2023-05-05T14:00:59.921803 | 2021-06-01T15:38:30 | 2021-06-01T15:38:30 | 301,602,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | class Solution:
def cuttingRope(self, n: int) -> int:
res = 0
for i in range(2, n + 1):
cnt = n // i
yu = n % i
res = max(res, (cnt + 1) ** yu * cnt ** (i - yu))
return res | [
"noreply@github.com"
] | Hegemony.noreply@github.com |
13a71adb0e45fcc83aa599afb5f5d43c08fd678f | ed11f664cbc459c7a4456dd58f2b231edcb22f33 | /ctm_api_client/models/agent_in_hostgroup.py | 25a98d8dbf0878138a0c8238cf908665308d20ce | [
"BSD-3-Clause"
] | permissive | jpmc216/ctm_python_client | c8b8ba60580bf869b3d1e6af9b99737e0a7ea527 | de44e5012214ec42bb99b7f9b4ebc5394cd14328 | refs/heads/main | 2023-08-26T22:06:34.022576 | 2021-10-25T13:41:31 | 2021-10-25T13:41:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,627 | py | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class AgentInHostgroup(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"host": "str",
"tag": "str",
"hostgroup_agent_participation": "HostgroupAgentParticipation",
}
attribute_map = {
"host": "host",
"tag": "tag",
"hostgroup_agent_participation": "hostgroupAgentParticipation",
}
def __init__(
self,
host=None,
tag=None,
hostgroup_agent_participation=None,
_configuration=None,
): # noqa: E501
"""AgentInHostgroup - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._host = None
self._tag = None
self._hostgroup_agent_participation = None
self.discriminator = None
self.host = host
if tag is not None:
self.tag = tag
if hostgroup_agent_participation is not None:
self.hostgroup_agent_participation = hostgroup_agent_participation
@property
def host(self):
"""Gets the host of this AgentInHostgroup. # noqa: E501
The hostname of the agent. # noqa: E501
:return: The host of this AgentInHostgroup. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this AgentInHostgroup.
The hostname of the agent. # noqa: E501
:param host: The host of this AgentInHostgroup. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and host is None:
raise ValueError(
"Invalid value for `host`, must not be `None`"
) # noqa: E501
self._host = host
@property
def tag(self):
"""Gets the tag of this AgentInHostgroup. # noqa: E501
Host Group tag. HIDDEN. # noqa: E501
:return: The tag of this AgentInHostgroup. # noqa: E501
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this AgentInHostgroup.
Host Group tag. HIDDEN. # noqa: E501
:param tag: The tag of this AgentInHostgroup. # noqa: E501
:type: str
"""
self._tag = tag
@property
def hostgroup_agent_participation(self):
"""Gets the hostgroup_agent_participation of this AgentInHostgroup. # noqa: E501
The host condition. HIDDEN. # noqa: E501
:return: The hostgroup_agent_participation of this AgentInHostgroup. # noqa: E501
:rtype: HostgroupAgentParticipation
"""
return self._hostgroup_agent_participation
@hostgroup_agent_participation.setter
def hostgroup_agent_participation(self, hostgroup_agent_participation):
"""Sets the hostgroup_agent_participation of this AgentInHostgroup.
The host condition. HIDDEN. # noqa: E501
:param hostgroup_agent_participation: The hostgroup_agent_participation of this AgentInHostgroup. # noqa: E501
:type: HostgroupAgentParticipation
"""
self._hostgroup_agent_participation = hostgroup_agent_participation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(AgentInHostgroup, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AgentInHostgroup):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AgentInHostgroup):
return True
return self.to_dict() != other.to_dict()
| [
"vtadinad@bmc.com"
] | vtadinad@bmc.com |
e6f27f97491f2a6ecc94d9372b6fdcd1658f6b7e | 4e22e93ecdb105df4e15c63f4503522b3525f70b | /ansible/ovirt-engine-ansible4/ov4_assigned_permissions | f936ffaff44356c2f6620a8a9c2915b06b06ba14 | [
"Apache-2.0"
] | permissive | machacekondra/ovirt-engine-ansible | df7e1d1bb14f154bc411bc628ee483898ecbc712 | ffc5fcb5d36f039e347208ab5f9fc672cfb1f596 | refs/heads/master | 2021-01-19T05:30:28.561742 | 2016-07-20T15:21:03 | 2016-07-20T15:21:03 | 61,116,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DOCUMENTATION = '''
---
module: ov4_assigned_permissions
short_description: ov4_assigned_permissions module to manage assigned-permissions in oVirt
author: "Ondra Machacek (@machacekondra)"
version_added: 2.0
description:
- "This modules is used to manage oVirt assigned-permissions."
options:
method:
required: True
description:
- "Action to be run on assigned-permissions."
choices:
- add
- list
- get
- remove
auth:
required: True
description:
- "Dictionary with values needed to create HTTP connection to oVirt:"
- "** C(username)[I(required)] - The name of the user, something like `I(admin@internal)`."
- "** C(password)[I(required)] - The password of the user."
- "** C(url)[I(required)] - A string containing the base URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`."
- "** C(sso_token) - SSO token to be used instead of login with username/password."
- "** C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "** C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used."
service:
required: false
description:
- "URL path of the service we want to work with, usually something like I(/vms/123/disks/456)."
parameters:
required: false
description:
- "Dictionary which specify additional parameters to be send with request."
- " C(add) parameters:"
- "** I(permission)[dict] - U(https://jhernand.fedorapeople.org/ovirt-api-explorer/#/types/permission)."
- " C(remove) parameters:"
- "** I(async)[boolean] - Indicates if the remove should be performed asynchronously."
'''
RETURN = '''
'''
import sys
import json
def add(connection, path, **kwargs):
request = Request(method='POST', path='%s/permissions' % path)
request.body = json.dumps(kwargs.pop('permission'))
response = connection.send(request)
if response.code in [201, 202]:
return {'changed': True, 'permission': response.body}
return {'changed': False, 'error': response.body}
def get(connection, path, **kwargs):
request = Request(method='GET', path='%s' % path, query=kwargs)
response = connection.send(request)
return {'changed': False, 'permission': response.body['permission']}
def list(connection, path, **kwargs):
request = Request(method='GET', path='%s/permissions' % path, query=kwargs)
response = connection.send(request)
if 'permission' in response.body:
return {'changed': False, 'permission': response.body['permission']}
return {'changed': False, 'error': response.body}
def remove(connection, path, **kwargs):
request = Request(method='DELETE', path='%s' % path, query=kwargs)
response = connection.send(request)
if response.code in [200]:
return {'changed': True}
return {'changed': False, 'error': response.body}
def main():
module = AnsibleModule(
argument_spec=dict(
method=dict(required=True, choices=['add', 'list', 'get', 'remove']),
auth=dict(required=True, type='dict'),
service=dict(required=False, type='str', default=''),
parameters=dict(required=False, type='dict', default=dict()),
)
)
auth = module.params.pop('auth')
connection = Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
sso_token=auth.get('sso_token', None),
)
try:
method = module.params.pop('method')
ret = getattr(sys.modules[__name__], method)(connection, module.params['service'], **module.params.pop('parameters'))
module.exit_json(**ret)
except Error as e:
module.fail_json(msg="Error: %s" % e)
finally:
if auth.get('sso_token', None) is None:
connection.close()
from ansible.module_utils.basic import *
from ansible.module_utils.ovirt4 import *
if __name__ == "__main__":
main()
| [
"omachace@redhat.com"
] | omachace@redhat.com | |
55fa3f7713e3159c89ae3d4fb4dd6300c16bd5fb | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/222/61395/submittedfiles/testes.py | 4e2bd5734fb77391b86989a58aa509a2b9e0c261 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py |
def angulosexasimal(L):
soma=0
for i in range(0,len(L),1):
soma=lista[i]+(lista[i+1]/60)+(lista[i+2]/3600)
return soma
L=[23,37,28]
print(soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
a6b7bb8cd0e86c6c35ca9e4e9bfd04ab18e13630 | aad164e4efe1d55cc189c35956bfd435b14a0f52 | /eve-8.21.494548/lib/carbonlib/trinity/windowsEvents.py | f2776157a9f13ed4b623582a3622bc8e728b2ef0 | [] | no_license | Pluckyduck/eve | 61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f | 9a277707ab1f162c6bd9618faf722c0be3ea93ad | refs/heads/master | 2020-12-28T23:35:29.992875 | 2013-05-06T14:24:33 | 2013-05-06T14:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,434 | py | #Embedded file name: c:\depot\games\branches\release\EVE-TRANQUILITY\carbon\common\lib\trinity\windowsEvents.py
WM_NULL = 0
WM_CREATE = 1
WM_DESTROY = 2
WM_MOVE = 3
WM_SIZE = 5
WM_ACTIVATE = 6
WM_SETFOCUS = 7
WM_KILLFOCUS = 8
WM_ENABLE = 10
WM_SETREDRAW = 11
WM_SETTEXT = 12
WM_GETTEXT = 13
WM_GETTEXTLENGTH = 14
WM_PAINT = 15
WM_CLOSE = 16
WM_QUERYENDSESSION = 17
WM_QUIT = 18
WM_QUERYOPEN = 19
WM_ERASEBKGND = 20
WM_SYSCOLORCHANGE = 21
WM_ENDSESSION = 22
WM_SYSTEMERROR = 23
WM_SHOWWINDOW = 24
WM_CTLCOLOR = 25
WM_WININICHANGE = 26
WM_SETTINGCHANGE = 26
WM_DEVMODECHANGE = 27
WM_ACTIVATEAPP = 28
WM_FONTCHANGE = 29
WM_TIMECHANGE = 30
WM_CANCELMODE = 31
WM_SETCURSOR = 32
WM_MOUSEACTIVATE = 33
WM_CHILDACTIVATE = 34
WM_QUEUESYNC = 35
WM_GETMINMAXINFO = 36
WM_PAINTICON = 38
WM_ICONERASEBKGND = 39
WM_NEXTDLGCTL = 40
WM_SPOOLERSTATUS = 42
WM_DRAWITEM = 43
WM_MEASUREITEM = 44
WM_DELETEITEM = 45
WM_VKEYTOITEM = 46
WM_CHARTOITEM = 47
WM_SETFONT = 48
WM_GETFONT = 49
WM_SETHOTKEY = 50
WM_GETHOTKEY = 51
WM_QUERYDRAGICON = 55
WM_COMPAREITEM = 57
WM_COMPACTING = 65
WM_WINDOWPOSCHANGING = 70
WM_WINDOWPOSCHANGED = 71
WM_POWER = 72
WM_COPYDATA = 74
WM_CANCELJOURNAL = 75
WM_NOTIFY = 78
WM_INPUTLANGCHANGEREQUEST = 80
WM_INPUTLANGCHANGE = 81
WM_TCARD = 82
WM_HELP = 83
WM_USERCHANGED = 84
WM_NOTIFYFORMAT = 85
WM_CONTEXTMENU = 123
WM_STYLECHANGING = 124
WM_STYLECHANGED = 125
WM_DISPLAYCHANGE = 126
WM_GETICON = 127
WM_SETICON = 128
WM_NCCREATE = 129
WM_NCDESTROY = 130
WM_NCCALCSIZE = 131
WM_NCHITTEST = 132
WM_NCPAINT = 133
WM_NCACTIVATE = 134
WM_GETDLGCODE = 135
WM_NCMOUSEMOVE = 160
WM_NCLBUTTONDOWN = 161
WM_NCLBUTTONUP = 162
WM_NCLBUTTONDBLCLK = 163
WM_NCRBUTTONDOWN = 164
WM_NCRBUTTONUP = 165
WM_NCRBUTTONDBLCLK = 166
WM_NCMBUTTONDOWN = 167
WM_NCMBUTTONUP = 168
WM_NCMBUTTONDBLCLK = 169
WM_KEYFIRST = 256
WM_KEYDOWN = 256
WM_KEYUP = 257
WM_CHAR = 258
WM_DEADCHAR = 259
WM_SYSKEYDOWN = 260
WM_SYSKEYUP = 261
WM_SYSCHAR = 262
WM_SYSDEADCHAR = 263
WM_KEYLAST = 264
WM_IME_STARTCOMPOSITION = 269
WM_IME_ENDCOMPOSITION = 270
WM_IME_COMPOSITION = 271
WM_IME_KEYLAST = 271
WM_INITDIALOG = 272
WM_COMMAND = 273
WM_SYSCOMMAND = 274
WM_TIMER = 275
WM_HSCROLL = 276
WM_VSCROLL = 277
WM_INITMENU = 278
WM_INITMENUPOPUP = 279
WM_MENUSELECT = 287
WM_MENUCHAR = 288
WM_ENTERIDLE = 289
WM_CTLCOLORMSGBOX = 306
WM_CTLCOLOREDIT = 307
WM_CTLCOLORLISTBOX = 308
WM_CTLCOLORBTN = 309
WM_CTLCOLORDLG = 310
WM_CTLCOLORSCROLLBAR = 311
WM_CTLCOLORSTATIC = 312
WM_MOUSEFIRST = 512
WM_MOUSEMOVE = 512
WM_LBUTTONDOWN = 513
WM_LBUTTONUP = 514
WM_LBUTTONDBLCLK = 515
WM_RBUTTONDOWN = 516
WM_RBUTTONUP = 517
WM_RBUTTONDBLCLK = 518
WM_MBUTTONDOWN = 519
WM_MBUTTONUP = 520
WM_MBUTTONDBLCLK = 521
WM_MOUSEWHEEL = 522
WM_MOUSEHWHEEL = 526
WM_PARENTNOTIFY = 528
WM_ENTERMENULOOP = 529
WM_EXITMENULOOP = 530
WM_NEXTMENU = 531
WM_SIZING = 532
WM_CAPTURECHANGED = 533
WM_MOVING = 534
WM_POWERBROADCAST = 536
WM_DEVICECHANGE = 537
WM_MDICREATE = 544
WM_MDIDESTROY = 545
WM_MDIACTIVATE = 546
WM_MDIRESTORE = 547
WM_MDINEXT = 548
WM_MDIMAXIMIZE = 549
WM_MDITILE = 550
WM_MDICASCADE = 551
WM_MDIICONARRANGE = 552
WM_MDIGETACTIVE = 553
WM_MDISETMENU = 560
WM_ENTERSIZEMOVE = 561
WM_EXITSIZEMOVE = 562
WM_DROPFILES = 563
WM_MDIREFRESHMENU = 564
WM_IME_SETCONTEXT = 641
WM_IME_NOTIFY = 642
WM_IME_CONTROL = 643
WM_IME_COMPOSITIONFULL = 644
WM_IME_SELECT = 645
WM_IME_CHAR = 646
WM_IME_KEYDOWN = 656
WM_IME_KEYUP = 657
WM_MOUSEHOVER = 673
WM_NCMOUSELEAVE = 674
WM_MOUSELEAVE = 675
WM_CUT = 768
WM_COPY = 769
WM_PASTE = 770
WM_CLEAR = 771
WM_UNDO = 772
WM_RENDERFORMAT = 773
WM_RENDERALLFORMATS = 774
WM_DESTROYCLIPBOARD = 775
WM_DRAWCLIPBOARD = 776
WM_PAINTCLIPBOARD = 777
WM_VSCROLLCLIPBOARD = 778
WM_SIZECLIPBOARD = 779
WM_ASKCBFORMATNAME = 780
WM_CHANGECBCHAIN = 781
WM_HSCROLLCLIPBOARD = 782
WM_QUERYNEWPALETTE = 783
WM_PALETTEISCHANGING = 784
WM_PALETTECHANGED = 785
WM_HOTKEY = 786
WM_PRINT = 791
WM_PRINTCLIENT = 792
WM_HANDHELDFIRST = 856
WM_HANDHELDLAST = 863
WM_PENWINFIRST = 896
WM_PENWINLAST = 911
WM_COALESCE_FIRST = 912
WM_COALESCE_LAST = 927
WM_DDE_FIRST = 992
WM_DDE_INITIATE = 992
WM_DDE_TERMINATE = 993
WM_DDE_ADVISE = 994
WM_DDE_UNADVISE = 995
WM_DDE_ACK = 996
WM_DDE_DATA = 997
WM_DDE_REQUEST = 998
WM_DDE_POKE = 999
WM_DDE_EXECUTE = 1000
WM_DDE_LAST = 1000
WM_USER = 1024
WM_APP = 32768
WM_XBUTTONDOWN = 523
WM_XBUTTONUP = 524
MK_CONTROL = 8
MK_LBUTTON = 1
MK_MBUTTON = 16
MK_RBUTTON = 2
MK_SHIFT = 4
MK_XBUTTON1 = 32
MK_XBUTTON2 = 64
XBUTTON1 = 1
XBUTTON2 = 2 | [
"ferox2552@gmail.com"
] | ferox2552@gmail.com |
2da592d046f1fde36fb19d493634f5a7e2f922cd | d58542787230deb9efa0499451a7f228570562c1 | /djangoapps/schools/migrations/0006_auto_20170330_0720.py | 322872df2939273964e354697590eec098517d2a | [
"MIT"
] | permissive | serudda/waysily-server | 5d9edfeb14fef208ea7dd10dc06deb44b784bb4c | b2da3f3a97dbd1ef46d65f13ee9b2098124d4fc4 | refs/heads/master | 2023-03-03T05:36:50.689008 | 2017-12-27T15:25:32 | 2017-12-27T15:25:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-30 07:20
from __future__ import unicode_literals
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('schools', '0005_auto_20170330_0249'),
]
operations = [
migrations.AlterField(
model_name='immersion',
name='option',
field=multiselectfield.db.fields.MultiSelectField(choices=[(1, 'Games and activities with local'), (2, 'Coffee tasting experience'), (3, 'Beer tasting experience'), (4, 'Chocolate tasting experience'), (5, 'Pub crawl'), (6, 'Local food tasting'), (7, 'Local dance class'), (8, 'Cooking Local Food'), (9, 'Local Movies Night'), (10, 'Practice local sport')], max_length=20, verbose_name='Immersion options'),
),
]
| [
"sergioruizdavila@gmail.com"
] | sergioruizdavila@gmail.com |
cbc8f4f44e59bc6c67f6e78b2974635907244521 | 005f02cb534bbf91fe634fcf401441e1179365c8 | /8-Python Level 2/8.1-Scope(hierarkia e variablave)/scope.py | ad63f08884169ea6ecf4d5dead0fa576e946220a | [] | no_license | Ruxhino-B/django-deployment-example | 220a39a456871a1bf42a64fd5b945731056fc7b9 | e19713ac1e11af202152ad20d7c3c94891a77e83 | refs/heads/master | 2020-04-18T02:21:10.505691 | 2020-01-06T14:18:18 | 2020-01-06T14:25:25 | 167,159,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # x = 25
# def my_func():
# x = 50
# return x
# print(x) #Ketu printohet 25 sepse eshte variabel global
# print(my_func()) #Ketu printohet 50 sepse merret x i funksionit
# my_func()
# print(x) #Ketu serisht printohet 25 edhe pse eshte thirrur my_func mesiper
#locals
#lambda x: x**2 #ky eshte nje varibel lokal
#eclosing function locals (funksione te bashkegjitur lokal)
# name = 'this is a global name!'
# def greet():
# name = "Samy"
# def hello():
# print("hello " + name)
# hello() #jo jep hello Samy. sepse jo kerkon nje shkalle me lart per vaiablin name
# #nqs name brenda func greet fshihet at here hello kerkon nje shkalle
# #me lart per variablin name dhe printon hello this is a global name!
# greet() #nuk publikon asgje sepse vetem sa i jep funksionit vleren Samy
# print(name) #kjo printon serisht This is a global name
#Billd in level jane funksione ose varibla qe i ka python vete psh len-->jep gjatsi
#len = 23 #kjo eshte gabim sepse nqs therasim len del 23 e jo me funksini me mat gjatsin
x = 50
def func(x):
print('x is: ',x) #jep x is 50
global x = 100 #ben ndryshimin e variablit global x
x = 100
print('x u be: ',x)#jep x u be 100
func(x)
print(x) #jep vleren 100 sepse x u ndryshu vlera global
| [
"ruxhino@gmail.com"
] | ruxhino@gmail.com |
73e53aef9409cf8adde4832c379ef7acbb4ac3d4 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/exercises/_algorithms_challenges/pybites/beginner/91/test_anyall.py | d7c14f07ad586e2dafa7a87f3d42bb10477a5bb5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 994 | py | # _______ p__
#
# ____ ? _______ ? ? ?
#
#
# ?p__.m__.p.("arg, expected", [
# ('aioue', T..),
# ('EoUia', T..),
# ('aaAiIee', T..),
# ('AEIOU', T..),
# ('aaeeouu', T..),
# ('abcde', F..),
# ('AE123', F..),
# ('AiOuef', F..),
# ])
# ___ test_contains_only_vowels arg, expected
# ... b.. ? ? __ ?
#
#
# ?p__.m__.p.("arg, expected", [
# ('Python', T..),
# ('pycharm', T..),
# ('PYTHON', T..),
# ('teaser', T..),
# ('bob', T..),
# ('julian', T..),
# ('yes', T..),
# ('no', T..),
# ('america', F..),
# ('B@b', F..),
# ('Jules', F..),
# ('agua', F..),
# ('123', F..),
# ('', F..),
# ])
# ___ test_contains_any_py_chars arg, expected
# ... b.. ? ? __ ?
#
#
# ?p__.m__.p.("arg, expected", [
# ('yes1', T..),
# ('123', T..),
# ('hello2', T..),
# ('up2date', T..),
# ('yes', F..),
# ('hello', F..),
# ('', F..),
# ])
# ___ test_contains_digits arg, expected
# ... b.. ? ? __ ? | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
04a74b3c4532d7de6e7e1b194009e0a4ffb452ef | a1798c553d5b0ddbbb323f3789db991e07105867 | /exec/bin/bash/google-cloud-sdk/.install/.backup/lib/surface/compute/networks/subnets/expand_ip_range.py | 5afff149ebc5aa364a22ec7be2b9cbc336c43bda | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/Wingman | d5d44752f4cd361d51b1e4a62076fe95106ec503 | 957143324fc795f034a57f529cd7b61b970f4a53 | refs/heads/master | 2022-11-20T05:43:31.780030 | 2016-09-30T02:07:00 | 2016-09-30T02:07:00 | 282,287,992 | 0 | 0 | null | 2020-07-24T18:15:54 | 2020-07-24T18:15:54 | null | UTF-8 | Python | false | false | 5,646 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for expanding IP range of a subnetwork."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as exceptions
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.core.console import console_io
import ipaddr
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class ExpandIpRange(base_classes.NoOutputAsyncMutator):
"""Expand IP range of a subnetwork."""
@staticmethod
def Args(parser):
flags.AddRegionFlag(
parser,
resource_type='subnetwork',
operation_type='expand IP range for')
parser.add_argument(
'--prefix-length',
type=int,
help=(
'The new prefix length of the subnet. It must be smaller than the '
'original and in the private address space 10.0.0.0/8, '
'172.16.0.0/12 or 192.168.0.0/16 defined in RFC 1918.'),
required=True)
parser.add_argument(
'name',
completion_resource='compute.subnetworks',
help='The name of the subnetwork for which to expand IP range.')
@property
def service(self):
return self.compute.subnetworks
@property
def method(self):
return 'ExpandIpCidrRange'
@property
def resource_type(self):
return 'subnetworks'
def CreateRequests(self, args):
"""Returns requests for expanding IP CIDR range."""
new_prefix_length = self._ValidatePrefixLength(args.prefix_length)
subnetwork_ref = self.CreateRegionalReference(args.name, args.region)
original_ip_cidr_range = self._GetOriginalIpCidrRange(subnetwork_ref)
new_ip_cidr_range = self._InferNewIpCidrRange(
subnetwork_ref.Name(), original_ip_cidr_range, new_prefix_length)
self._PromptToConfirm(
subnetwork_ref.Name(), original_ip_cidr_range, new_ip_cidr_range)
request = self._CreateExpandIpCidrRangeRequest(
subnetwork_ref, new_ip_cidr_range)
return [request]
def _ValidatePrefixLength(self, new_prefix_length):
if not 0 <= new_prefix_length <= 29:
raise exceptions.InvalidArgumentException(
'--prefix-length',
'Prefix length must be in the range [0, 29].')
return new_prefix_length
def _GetOriginalIpCidrRange(self, subnetwork_ref):
subnetwork = self._GetSubnetwork(subnetwork_ref)
if not subnetwork:
raise exceptions.ToolException(
'Subnet [{subnet}] was not found in region {region}.'.format(
subnet=subnetwork_ref.Name(), region=subnetwork_ref.region))
return subnetwork['ipCidrRange']
def _InferNewIpCidrRange(
self, subnet_name, original_ip_cidr_range, new_prefix_length):
unmasked_new_ip_range = '{0}/{1}'.format(
original_ip_cidr_range.split('/')[0],
new_prefix_length)
network = ipaddr.IPv4Network(unmasked_new_ip_range)
return str(network.masked())
def _PromptToConfirm(
self, subnetwork_name, original_ip_cidr_range, new_ip_cidr_range):
prompt_message_template = (
'The IP range of subnetwork [{0}] will be expanded from {1} to {2}. '
'This operation may take several minutes to complete '
'and cannot be undone.')
prompt_message = prompt_message_template.format(
subnetwork_name, original_ip_cidr_range, new_ip_cidr_range)
if not console_io.PromptContinue(message=prompt_message, default=True):
raise exceptions.ToolException('Operation aborted by user.')
def _CreateExpandIpCidrRangeRequest(self, subnetwork_ref, new_ip_cidr_range):
request_body = self.messages.SubnetworksExpandIpCidrRangeRequest(
ipCidrRange=new_ip_cidr_range)
return self.messages.ComputeSubnetworksExpandIpCidrRangeRequest(
subnetwork=subnetwork_ref.Name(),
subnetworksExpandIpCidrRangeRequest=request_body,
project=self.project,
region=subnetwork_ref.region)
def _GetSubnetwork(self, subnetwork_ref):
get_request = (
self.compute.subnetworks,
'Get',
self.messages.ComputeSubnetworksGetRequest(
project=self.project,
region=subnetwork_ref.region,
subnetwork=subnetwork_ref.Name()))
errors = []
objects = request_helper.MakeRequests(
requests=[get_request],
http=self.http,
batch_url=self.batch_url,
errors=errors,
custom_get_requests=None)
resources = list(lister.ProcessResults(objects, field_selector=None))
return resources[0] if resources else None
ExpandIpRange.detailed_help = {
'brief': 'Expand the IP range of a Google Compute Engine subnetwork',
'DESCRIPTION': """\
*{command}* is used to expand the IP range of a subnetwork in a custom
mode network.
""",
'EXAMPLES': """\
To expand the IP range of ``SUBNET'' to /16, run:
$ {command} SUBNET --region us-central1 --prefix-length 16
""",
}
| [
"tobiah.rex@gmail.com"
] | tobiah.rex@gmail.com |
90df0b83ccc840e73645521d83b6cac57016345f | e6945ece453368c03a77626833e38416ac736a66 | /algorithms/basic_algorithms/sort012.py | 4c05fc48dfafe6a468e49c2c9367b4c5e1b6beb7 | [] | no_license | aa-ag/nano | f8949efa0e6c5df9365629ef7e4a7447ea0d8bb7 | 3133662b7095069485a9860bef58f9bc5760ada3 | refs/heads/main | 2023-04-17T05:28:12.621387 | 2021-04-14T00:52:54 | 2021-04-14T00:52:54 | 338,670,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | def sort_012(input_list):
"""
The idea is to put 0 and 2 in their correct positions, which will make sure
all the 1s are automatically placed in their right positions
"""
# initialize pointers for next positions of 0 and 2
next_pos_0 = 0
next_pos_2 = len(input_list) - 1
front_index = 0
while front_index <= next_pos_2:
if input_list[front_index] == 0:
input_list[front_index] = input_list[next_pos_0]
input_list[next_pos_0] = 0
next_pos_0 += 1
front_index += 1
elif input_list[front_index] == 2:
input_list[front_index] = input_list[next_pos_2]
input_list[next_pos_2] = 2
next_pos_2 -= 1
else:
front_index += 1
# tests
def test_function(test_case):
sort_012(test_case)
if test_case == sorted(test_case):
print("Pass")
else:
print("Fail")
# test 1
test_case = [0, 0, 2, 2, 2, 1, 1, 1, 2, 0, 2]
test_function(test_case)
# test 2
test_case = [2, 1, 2, 0, 0, 2, 1, 0, 1, 0, 0,
2, 2, 2, 1, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1]
test_function(test_case)
# test 3
test_case = [2, 2, 0, 0, 2, 1, 0, 2, 2, 1, 1, 1, 0, 1, 2, 0, 2, 0, 1]
test_function(test_case)
| [
"aaron.aguerrevere@gmail.com"
] | aaron.aguerrevere@gmail.com |
af5a5803b937541ad6881d91e5345a919f0db0ba | 8c2b682e8bb27a32553cd6012d249df68617d262 | /src/rot13.py | 6b5445bca95bd2be62b28e8b32a33e78f9bfe258 | [] | no_license | Vlad-Shcherbina/Morph-Endo-Legacy | 7ca6de7632bb1fee69e1b6ec614a9ac9181fd297 | e620eeb2e144d0f8cf252770d4b5c38ac99449c7 | refs/heads/master | 2021-01-22T04:36:49.008953 | 2011-02-18T16:22:34 | 2011-02-18T16:22:34 | 1,352,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | import sys
A = ord('A')
a = ord('a')
table = ''.join(map(chr,
range(A)+
range(A+13, A+26)+range(A, A+13)+
range(A+26, a)+
range(a+13, a+26)+range(a, a+13)+
range(a+26, 256)))
for line in sys.stdin:
sys.stdout.write(line.translate(table)) | [
"vlad.shcherbina@gmail.com"
] | vlad.shcherbina@gmail.com |
72189e28891678dea78ecc6262d662e8917521d2 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/firestore/backups/delete.py | db44a094048b32399ee0cdc1780a08bdc70508c3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 1,583 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud Firestore backups delete command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.firestore import backups
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.firestore import flags
from googlecloudsdk.core import properties
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Delete(base.DeleteCommand):
"""Deletes a Cloud Firestore backup.
## EXAMPLES
To delete `cf9f748a-7980-4703-b1a1-d1ffff591db0` backup in us-east1.
$ {command} --location=us-east1
--backup=cf9f748a-7980-4703-b1a1-d1ffff591db0
"""
@staticmethod
def Args(parser):
flags.AddLocationFlag(parser, required=True, hidden=True)
flags.AddBackupFlag(parser)
def Run(self, args):
project = properties.VALUES.core.project.Get(required=True)
return backups.DeleteBackup(project, args.location, args.backup)
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
3b9de62a4fe10f0e2e6b3a426c400aff2b705434 | 45e66980d15a06b264f31f9a7d6dcd6dc271b815 | /test/functional/rpc_getchaintips.py | f64055f5d7e031f912ab20627262990cd936fe7d | [
"MIT"
] | permissive | Lucky1689/ukcoin | e3ff17c66c85f5531d81580e4bc84ff3994924af | 11bcd6ded7b11a7179e32f1bf0d6f75615c0dde1 | refs/heads/master | 2022-09-20T17:25:14.553647 | 2020-06-03T18:08:17 | 2020-06-03T18:08:17 | 262,382,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Copyright (c) 2020 The Ukcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import UkcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (UkcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"]]
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| [
"Ukcoin168@gmail.com"
] | Ukcoin168@gmail.com |
fb55a6fe4035493d57f4e6701df30f4667531636 | 24cf6d01fc9485c2e5578523bce6313aab47a30e | /DataLoaders/RN_DataLoader.py | 722af7b3989143c336ff0c507fcc1971dabe7058 | [] | no_license | sahahn/GenDiagFramework | 352212b2c540a6db73e810e416a9d3d4fa84f95a | 29498d3667d644d5b3a8fd0f0e277cbdd14027ba | refs/heads/master | 2020-04-14T06:10:10.609366 | 2019-06-18T20:22:31 | 2019-06-18T20:22:31 | 163,678,894 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 11:09:31 2018
@author: sage
"""
from DataLoaders.TwoD_DataLoader import TwoD_DataLoader
from config import config
import nibabel as nib
import numpy as np
import csv
def get_name_slc(chunk):
'''Specific function for loading retina-net style csv'''
relevant_chunk = chunk.split('/')[-1].replace('.jpg','')
name = relevant_chunk[:-3]
slc = int(relevant_chunk[-3:])
return name, slc
class RN_DataLoader(TwoD_DataLoader):
def load_labels(self, include_none=True):
print('include none: ', include_none)
self.file_names = set()
self.data_points = []
with open(self.label_location) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
name, slc = get_name_slc(row[0])
self.file_names.add(name)
try:
label = [float(row[i]) for i in range(1,5)]
except ValueError:
label = [None]
label.append(config['name_convs'][row[-1]])
if label[0] == None and not include_none:
continue
if label[0] == None:
label = np.empty((5))
self.data_points.append(self.create_data_point(name, label, slc=slc))
def load_new(self):
for name in self.label_location:
raw_file_path = self.init_location + name + '.nii'
try:
raw_file = nib.load(raw_file_path)
except:
raw_file = nib.load(raw_file_path + '.gz')
data = raw_file.get_data()
data = data.transpose(2,1,0)
for slc in range(len(data)):
label = np.empty((5))
dp = self.create_data_point(name, label, slc=slc)
image = data[slc]
image = self.initial_preprocess(image, 0)
dp.set_data(image)
self.data_points.append(dp)
def load_annotations(annotations_loc):
'''Create an instance of the Retina Net DataLoader in order to load
the data point w/ just label, name and slice information, and return
the datapoints - notably loading only annotations with info'''
RN_Loader = RN_DataLoader('fake/', annotations_loc)
RN_Loader.load_labels(include_none=False)
return RN_Loader.data_points
| [
"sahahn@uvm.edu"
] | sahahn@uvm.edu |
4e825dc3c1d3e37064aefc4516a83fc19b3800e0 | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/greedy/Min_Steps_to_Make_Piles_Equal_Height.py | 5a29476f4a588b2983fa92ec5fc6e3ae81d6e5c0 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # microsoft
# Alexa is given n piles of equal or unequal heights. In one step, Alexa can remove any number of
# boxes from the pile which has the maximum height and try to make it equal to the one which is just
# lower than the maximum height of the stack.
# Determine the minimum number of steps required to make all of the piles equal in height.
#
# Example 1:
#
# Input: piles = [5, 2, 1]
# Output: 3
# Explanation:
# Step 1: reducing 5 -> 2 [2, 2, 1]
# Step 2: reducing 2 -> 1 [2, 1, 1]
# Step 3: reducing 2 -> 1 [1, 1, 1]
# So final number of steps required is 3.
def solution(piles):
total = 0
l = sorted(piles,reverse = True)
for i in range(0, len(l) - 1):
if l[i] > l[i + 1]:
total += i + 1
return total | [
"ar.smglln@gmail.com"
] | ar.smglln@gmail.com |
f92a2d65cadcf3edcc995607470cff87ffcb5b0f | b525ad01dac1595f1f8e124067e7cf86fe79f5f1 | /car_yaohao/tui/yaohao_prediction.py | ce300b7db846ddd4010841f554da6c085cc1db86 | [] | no_license | oaifaye/tensorflow_demo | cad381700d16b10052525a4c2210cf2243df746b | 660636d7bef8c7f4853c0b3a99ffdb4b6f569180 | refs/heads/master | 2021-09-05T09:44:22.357399 | 2018-01-26T07:02:24 | 2018-01-26T07:02:24 | 119,017,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,400 | py | '''
Created on 2018年1月13日
@author: Administrator
tf多特征非线性回归
https://www.2cto.com/kf/201704/626628.html
'''
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 特征数
featurenum = 3
x = tf.placeholder(tf.float32, [None, featurenum])
y = tf.placeholder(tf.float32, [None, 1])
#定义神经网络中间层权值
weights_l1 = tf.placeholder(tf.float32, [featurenum, 10])
biases_l1 = tf.placeholder(tf.float32, [1, 10])
wx_plust_b_l1 = tf.matmul(x, weights_l1) + biases_l1
# l1 = tf.nn.relu(wx_plust_b_l1)#双曲正切函数作为激活函数
l1 = tf.nn.tanh(wx_plust_b_l1)#双曲正切函数作为激活函数
# l1 = tf.sigmoid(wx_plust_b_l1)
#定义输出层
weights_l2 = tf.placeholder(tf.float32, [ 10,1])
biases_l2 = tf.placeholder(tf.float32, [ 1,1])
wx_plust_b_l2 = tf.matmul(l1, weights_l2) + biases_l2
# prediction = tf.nn.relu(wx_plust_b_l2)#预测结果
prediction = tf.nn.tanh(wx_plust_b_l2)#预测结果
# prediction = tf.sigmoid(wx_plust_b_l2)#预测结果
#代价函数
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())#变量初始化,一定要做
seed_plt = 0.042
x_data = [
[-seed_plt*23,0.11789,0.11910],[-seed_plt*22,0.12477,0.12820],
[-seed_plt*21,0.14280,0.14806],[-seed_plt*20,0.15508,0.16252],[-seed_plt*19,0.16890,0.17658],
[-seed_plt*18,0.18292,0.19115],[-seed_plt*17,0.19867,0.20520],[-seed_plt*16,0.19941,0.20397],
[-seed_plt*15,0.16605,0.16883],[-seed_plt*14,0.15972,0.16275],[-seed_plt*13,0.16640,0.16952],
[-seed_plt*12,0.15151,0.15679],[-seed_plt*11,0.15752,0.16554],[-seed_plt*10,0.18508,0.19272],
[-seed_plt*9,0.19853,0.21006],[-seed_plt*8,0.21182,0.22585],[-seed_plt*7,0.32371,0.24690],
[-seed_plt*6,0.25987,0.27367],[-seed_plt*5,0.28182,0.29401],[-seed_plt*4,0.28292,0.29100],
[-seed_plt*3,0.22107,0.22590],[-seed_plt*2,0.22458,0.23236],[-seed_plt*1,0.23198,0.24061],
[seed_plt*1,0.21902,0.23076],[seed_plt*2,0.24511,0.25761],[seed_plt*3,0.26545,0.28025],
[seed_plt*4,0.27891,0.29496],[seed_plt*5,0.30306,0.32079],[seed_plt*6,0.31664,0.32712],
[seed_plt*7,0.15473,0.15936],[seed_plt*8,0.16705,0.17397],[seed_plt*9,0.18764,0.19823],
[seed_plt*10,0.19872,0.21006],[seed_plt*11,0.20558,0.21532],[seed_plt*12,0.20169,0.21244],
[seed_plt*13,0.19800,0.20500]
]
#学习率0.1
weights_ave_l1_p = [[-1.6345720291137695, 0.16002704203128815, -3.720055341720581, -0.09972929954528809, -2.0044445991516113, -0.25151756405830383, 1.092233657836914, -1.3219331502914429, 2.7261900901794434, -0.09247738122940063], [0.7514986395835876, -1.0940899848937988, 2.1134166717529297, -0.08639880269765854, 0.4848458170890808, -1.8154855966567993, 2.683783531188965, -0.4798979163169861, -0.3681236803531647, 0.3427703082561493], [0.5650380849838257, -0.6899119019508362, -0.3770529627799988, -1.0565630197525024, -2.6447322368621826, -2.462433099746704, 1.0621711015701294, 0.7418441772460938, -0.027339881286025047, -1.2374218702316284]]
biases_ave_l1_p = [[0.002408053958788514, -0.043371863663196564, -0.10514367371797562, -0.022852644324302673, -0.2746281027793884, 0.09120028465986252, 0.18297520279884338, -0.15229398012161255, 0.163157120347023, -0.03869754448533058]]
weights_ave_l2_p = [[1.0024975538253784], [0.984232485294342], [-0.8198782801628113], [0.8732115030288696], [-0.6737377047538757], [-2.5154635906219482], [-1.3600997924804688], [-1.046596646308899], [-1.076171875], [0.9199466109275818]]
biases_ave_l2_p = [[-0.08473621308803558]]
prediction_ave = sess.run(prediction, feed_dict={x:x_data,weights_l1:weights_ave_l1_p,biases_l1:biases_ave_l1_p,weights_l2:weights_ave_l2_p,biases_l2:biases_ave_l2_p})
print('预测ave:',prediction_ave)
weights_min_l1_p = [[1.5045439004898071, -2.2153048515319824, -0.05088438838720322, -5.076720237731934, 0.08630691468715668, -1.2653288841247559, -0.8979445695877075, 0.05308128520846367, 3.4334943294525146, 0.022553058341145515], [0.02079005539417267, -0.7477558851242065, 0.9663386344909668, -2.0129551887512207, -0.8947086930274963, -0.8803507685661316, 3.7918221950531006, 1.847798228263855, 4.007891654968262, 1.2713923454284668], [-2.1645727157592773, -1.8836021423339844, -0.6455804705619812, -2.0849404335021973, -0.35961779952049255, 0.14747656881809235, -2.182036876678467, -1.195816993713379, -0.455473929643631, 2.7534902095794678]]
biases_min_l1_p = [[-0.05450700595974922, -0.057999689131975174, 0.009392624720931053, -0.04014386981725693, -0.0126974917948246, 0.001202933257445693, 0.007342544849961996, 0.01960461027920246, -0.04443024843931198, -0.02939458005130291]]
weights_min_l2_p = [[-1.569625735282898], [1.710821509361267], [-0.38559436798095703], [1.3638279438018799], [0.5812253952026367], [0.3018932342529297], [-2.6823058128356934], [-0.8767712712287903], [2.8246896266937256], [1.399678349494934]]
biases_min_l2_p = [[-0.024785390123724937]]
prediction_min = sess.run(prediction, feed_dict={x:x_data,weights_l1:weights_min_l1_p,biases_l1:biases_min_l1_p,weights_l2:weights_min_l2_p,biases_l2:biases_min_l2_p})
print('预测min:',prediction_min)
#画图
# seed_plt = 0.040
x_plt = [[-seed_plt*23],[-seed_plt*22],[-seed_plt*21],[-seed_plt*20],[-seed_plt*19],[-seed_plt*18],[-seed_plt*17],[-seed_plt*16],[-seed_plt*15],[-seed_plt*14],[-seed_plt*13],[-seed_plt*12],
[-seed_plt*11],[-seed_plt*10],[-seed_plt*9],[-seed_plt*8],[-seed_plt*7],[-seed_plt*6],[-seed_plt*5],[-seed_plt*4],[-seed_plt*3],[-seed_plt*2],[-seed_plt*1],
[seed_plt*1],[seed_plt*2],[seed_plt*3],[seed_plt*4],[seed_plt*5],[seed_plt*6],[seed_plt*7],[seed_plt*8],[seed_plt*9],[seed_plt*10],[seed_plt*11],[seed_plt*12],[seed_plt*13],
# [seed_plt*14],
# [seed_plt*15],[seed_plt*16]#,[seed_plt*17],[seed_plt*18],[seed_plt*19],[seed_plt*20],[seed_plt*21],[seed_plt*22],[seed_plt*23],[seed_plt*24],[seed_plt*25]
]
plt.figure()
plt.plot(x_plt, prediction_ave, 'r-', lw = 5)#画预测的实线,红色
plt.plot(x_plt, prediction_min, 'b-', lw = 5)#画预测的实线,红色
plt.show()
| [
"slf_work@hotmail.com"
] | slf_work@hotmail.com |
7558eb13c62e6e8701d8c5244984f91e09431bde | 750c78b144a9ff5f03732e0db56af61e4b99de04 | /allProjects/allProjects/asgi.py | 9e2dcc5548b16ec4f516282108a60bba2206402c | [] | no_license | Pradeepsuthar/Covid19_symptomChecker | 30af12bc26dfbcfea1c5f8231016fc5b17bac066 | 5c42464819dbc188d2957b4d5e98f85f90c9e3f0 | refs/heads/master | 2021-05-21T16:58:44.421559 | 2020-09-17T19:18:09 | 2020-09-17T19:18:09 | 252,727,833 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
ASGI config for allProjects project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'allProjects.settings')
application = get_asgi_application()
| [
"sutharpradeep081@gmail.com"
] | sutharpradeep081@gmail.com |
1c8a5f2b8ea1d21124aa4c853b16f90ccf7db25c | 636ba2700eaf3a151b73144b510f38c75ab1919d | /keras/keras46_MC_5_diabetes.py | 5b1e1f877d96d175939c96bf9885a18040677c9f | [] | no_license | Taerimmm/ML | 17997f388e18c28dfd9de83af98a6d4bebe7e1f0 | 6147cede81ebcc95f21adebf75731fbbb11edfab | refs/heads/master | 2023-06-10T14:26:45.335219 | 2021-07-05T15:30:47 | 2021-07-05T15:30:47 | 324,874,959 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | import numpy as np
from sklearn.datasets import load_diabetes
dataset = load_diabetes()
x = dataset.data
y = dataset.target
print(x.shape, y.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2, random_state=45)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=.2, random_state=45)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(10,)))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
modelpath = '../data/modelcheckpoint/k46_diabetes_{epoch:02d}-{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
es = EarlyStopping(monitor='val_loss', patience=50, mode='auto')
model.fit(x_train, y_train, epochs=10000, batch_size=8, validation_data=(x_val, y_val), verbose=2, callbacks=[es,cp])
loss, mse = model.evaluate(x_test, y_test)
print("loss :", loss)
print('MSE :', mse)
y_predict = model.predict(x_test)
from sklearn.metrics import mean_squared_error, r2_score
def rmse(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print('RMSE :', rmse(y_test, y_predict))
print('MSE :', mean_squared_error(y_test, y_predict))
print('R2 :', r2_score(y_test, y_predict))
| [
"xofla7560@naver.com"
] | xofla7560@naver.com |
d4b3004e359038387a94c6cd18da5927de369bf1 | b09de58f95a76a5b1304205e44ba6be3965da33a | /chapter_06/06_use_generator_model.py | 6293a47fcd3a3256bffaa319886fa4c8d652695d | [] | no_license | fenago/generative-adversarial-networks | 416c6c84370c87148e00fa714b5d7ac043667cc9 | 9c9c08e25d01d7b69c863d7a9927b15140dca4d3 | refs/heads/master | 2021-01-02T12:02:08.749359 | 2020-02-29T12:35:21 | 2020-02-29T12:35:21 | 239,614,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # %%
# define and use the generator model
from numpy.random import randn
from keras.models import Sequential
from keras.layers import Dense
%matplotlib notebook
from matplotlib import pyplot
# define the standalone generator model
def define_generator(latent_dim, n_outputs=2):
model = Sequential()
model.add(Dense(15, activation='relu', kernel_initializer='he_uniform', input_dim=latent_dim))
model.add(Dense(n_outputs, activation='linear'))
return model
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n):
# generate points in the latent space
x_input = randn(latent_dim * n)
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n, latent_dim)
return x_input
# use the generator to generate n fake examples and plot the results
def generate_fake_samples(generator, latent_dim, n):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n)
# predict outputs
X = generator.predict(x_input)
# plot the results
pyplot.scatter(X[:, 0], X[:, 1])
pyplot.show()
# size of the latent space
latent_dim = 5
# define the discriminator model
model = define_generator(latent_dim)
# generate and plot generated samples
generate_fake_samples(model, latent_dim, 100) | [
"31277617+athertahir@users.noreply.github.com"
] | 31277617+athertahir@users.noreply.github.com |
b99a243aa03a9fd7d85f1b818df6bfd48f4d9dab | 4bb30a508977ad6b950b98c8ef4d7dec9d988a6a | /bacs350/demo/week05/Demo14/Demo14/wsgi.py | 6cc8bbaec0e26b099073f17bc6059709b4275f6a | [] | no_license | Mark-Seaman/UNC-BACS350-2020-Fall | cfad05b6b58f15401e120beba9a79a9bbd8a1525 | 8a14ad72f43fdffa9491b9a9b38d65d71a074986 | refs/heads/master | 2023-07-19T03:01:27.298647 | 2020-12-14T19:24:39 | 2020-12-14T19:24:39 | 279,434,877 | 0 | 0 | null | 2021-09-22T19:44:33 | 2020-07-13T23:41:47 | HTML | UTF-8 | Python | false | false | 389 | py | """
WSGI config for Demo14 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Demo14.settings')
application = get_wsgi_application()
| [
"Mark.Seaman@imac.net"
] | Mark.Seaman@imac.net |
ec26144644568fdd3f604ce3d8d4aa765ca16d56 | 3d569375e38cbc2e73f54a9e5dd140b4021edb46 | /爬虫/requests项目/爬取去哪儿景点.py | da06f893e653a0a7f404c190512dd1a2bdc4fadd | [] | no_license | Gscsd8527/python | 2dffb13944346ca1772a4de52a80c644f19bcf72 | c7cb0653355365fc18a235f427315fae8f2b8734 | refs/heads/master | 2020-04-28T21:15:23.514693 | 2019-04-20T12:50:04 | 2019-04-20T12:50:04 | 175,575,773 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | import requests
from bs4 import BeautifulSoup
import re
def parse(url):
html = requests.get(url,headers=headers)
html = html.text
soup = BeautifulSoup(html,'lxml')
city = soup.select('div.e_destin_ct dl.m_nav dd a')
lst =[]
for i in city:
# 有的城市被推荐到热门城市中去了,而在地区中也有该城市,所以要去重
if i.text not in lst:
lst.append(i.text)
city_name = i.text
city_url = i.get('href')
city_url='https:'+city_url
parse_url(city_name,city_url)
# 解析每个城市url中的数据
def parse_url(city_name,city_url):
print(city_name,city_url)
html = requests.get(city_url,headers=headers)
html = html.text
soup = BeautifulSoup(html,'lxml')
print(soup)
if __name__=='__main__':
headers = {
'User-Agent': 'Mozilla/4.0(compatible;MSIE 5.5;Windows NT)', }
start_url='https://dujia.qunar.com/p/domestic?tm=ign_origin'
parse(start_url)
| [
"tan_gscsd@163.com"
] | tan_gscsd@163.com |
93410943fa474b3e5f7f1f559b3bddf843a3abd9 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4/Gather2_W_fixGood_C_change/ep0_test/pyr_0s/L6/step09_0side_L6.py | 21fc6f084b824966d3500838e49e391b5b9d89c5 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | #############################################################################################################################################################################################################
from step08_c_use_G_generate_I_w_M_to_Wx_Wy_Wz_focus_to_Cx_Cy_focus_combine import I_w_M_to_W_to_C
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W_to_C
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_gen_op_p20 = I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
use_train_step_p20 = Train_step_I_w_M_to_W_to_C( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit ) ### 我目前的 multi_model 的 I_to_Wxyz_to_Cxy_general 是 全部都回傳 Wz_pre_w_M, Wy_pre_w_M, Wx_pre_w_M, Cx_pre_w_M, Cy_pre_w_M, 所以不管 wi/woDIV, Separate 全設 True 就對了
from Exps_7_v3.doc3d.Ablation4.W_w_M_to_C_pyr.pyr_0s.L6.step09_0side_L6 import *
from Exps_7_v3.doc3d.Ablation4.I_w_M_to_W_pyr.pyr_3s.L5.step09_3side_L5 import ch032_pyramid_1side_6__2side_4__3side_3 as I_w_M_to_W_Tcrop255_p20_3s_L5_good
import time
start_time = time.time()
###############################################################################################################################################################################################
#########################################################################################
ch032_pyramid_0side_and_1s6_2s6 = KModel_builder().set_model_name(MODEL_NAME.multi_flow_unet).set_multi_model_builders(op_type="I_to_Wxyz_to_Cxy_general", W_to_Cx_Cy=ch032_pyramid_0side, I_to_Wx_Wy_Wz=I_w_M_to_W_Tcrop255_p20_3s_L5_good).set_multi_model_separate_focus(I_to_W_separ=False, I_to_W_focus=True, W_to_C_separ=False, W_to_C_focus=True).set_gen_op( use_gen_op_p20 ).set_train_step( use_train_step_p20 )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_0side
use_model = use_model.build()
result = use_model.generator(data, Mask=data)
print(result[0].shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
f6c058527d475a757333395590501ec8765c4950 | c3a6b6f74623b2b26e6d4a259b06367ff6ac7a60 | /tests/test_backbones/test_shufflenet_v2.py | d69ec6c117832e05159a8a12e9d20694a34127ea | [
"Apache-2.0"
] | permissive | jcwon0/BlurHPE_prev | f3785eeac7063799874f5272eacbda0234d0369a | 8cac1de10a60898eaa702e536c4e24a27d469f6d | refs/heads/master | 2023-04-01T03:26:59.371057 | 2021-04-02T07:51:46 | 2021-04-02T07:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,495 | py | import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmpose.models.backbones import ShuffleNetV2
from mmpose.models.backbones.shufflenet_v2 import InvertedResidual
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (InvertedResidual, )):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def test_shufflenetv2_invertedresidual():
with pytest.raises(AssertionError):
# when stride==1, in_channels should be equal to out_channels // 2 * 2
InvertedResidual(24, 32, stride=1)
with pytest.raises(AssertionError):
# when in_channels != out_channels // 2 * 2, stride should not be
# equal to 1.
InvertedResidual(24, 32, stride=1)
# Test InvertedResidual forward
block = InvertedResidual(24, 48, stride=2)
x = torch.randn(1, 24, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 48, 28, 28))
# Test InvertedResidual with checkpoint forward
block = InvertedResidual(48, 48, stride=1, with_cp=True)
assert block.with_cp
x = torch.randn(1, 48, 56, 56)
x.requires_grad = True
x_out = block(x)
assert x_out.shape == torch.Size((1, 48, 56, 56))
def test_shufflenetv2_backbone():
with pytest.raises(ValueError):
# groups must be in 0.5, 1.0, 1.5, 2.0]
ShuffleNetV2(widen_factor=3.0)
with pytest.raises(ValueError):
# frozen_stages must be in [0, 1, 2, 3]
ShuffleNetV2(widen_factor=1.0, frozen_stages=4)
with pytest.raises(ValueError):
# out_indices must be in [0, 1, 2, 3]
ShuffleNetV2(widen_factor=1.0, out_indices=(4, ))
with pytest.raises(TypeError):
# pretrained must be str or None
model = ShuffleNetV2()
model.init_weights(pretrained=1)
# Test ShuffleNetV2 norm state
model = ShuffleNetV2()
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
# Test ShuffleNetV2 with first stage frozen
frozen_stages = 1
model = ShuffleNetV2(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for param in model.conv1.parameters():
assert param.requires_grad is False
for i in range(0, frozen_stages):
layer = model.layers[i]
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ShuffleNetV2 with norm_eval
model = ShuffleNetV2(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ShuffleNetV2 forward with widen_factor=0.5
model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 48, 28, 28))
assert feat[1].shape == torch.Size((1, 96, 14, 14))
assert feat[2].shape == torch.Size((1, 192, 7, 7))
# Test ShuffleNetV2 forward with widen_factor=1.0
model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 116, 28, 28))
assert feat[1].shape == torch.Size((1, 232, 14, 14))
assert feat[2].shape == torch.Size((1, 464, 7, 7))
# Test ShuffleNetV2 forward with widen_factor=1.5
model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 176, 28, 28))
assert feat[1].shape == torch.Size((1, 352, 14, 14))
assert feat[2].shape == torch.Size((1, 704, 7, 7))
# Test ShuffleNetV2 forward with widen_factor=2.0
model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 244, 28, 28))
assert feat[1].shape == torch.Size((1, 488, 14, 14))
assert feat[2].shape == torch.Size((1, 976, 7, 7))
# Test ShuffleNetV2 forward with layers 3 forward
model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, ))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert isinstance(feat, torch.Tensor)
assert feat.shape == torch.Size((1, 464, 7, 7))
# Test ShuffleNetV2 forward with layers 1 2 forward
model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2))
model.init_weights()
model.train()
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size((1, 232, 14, 14))
assert feat[1].shape == torch.Size((1, 464, 7, 7))
# Test ShuffleNetV2 forward with checkpoint forward
model = ShuffleNetV2(widen_factor=1.0, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
| [
"jcwon@postech.ac.kr"
] | jcwon@postech.ac.kr |
f6ddf8e6bf8dc89101fd648bc2a72a106cebf946 | 85f94cfd370ca7d384977cb091e886d8f80161e8 | /setup.py | ff1a0fe29c8ffa0d284df0108fcff728caaedef3 | [
"MIT"
] | permissive | litwisha/asyncio_monkey | f4df643a953f6282b6a2f1202cef5d2cfc113f98 | 7f845b676bdb4db1f4ccb8377327a09db4391322 | refs/heads/master | 2020-12-30T14:44:29.324840 | 2017-05-12T08:54:53 | 2017-05-12T08:54:53 | 91,078,807 | 0 | 0 | null | 2017-05-12T10:10:30 | 2017-05-12T10:10:30 | null | UTF-8 | Python | false | false | 1,349 | py | import io
import os
import re
from setuptools import setup
def get_version():
regex = r"__version__\s=\s\'(?P<version>[\d\.]+?)\'"
path = ('asyncio_monkey.py',)
return re.search(regex, read(*path)).group('version')
def read(*parts):
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), *parts)
with io.open(filename, encoding='utf-8', mode='rt') as fp:
return fp.read()
setup(
name='asyncio_monkey',
version=get_version(),
author='wikibusiness',
author_email='osf@wikibusiness.org',
url='https://github.com/wikibusiness/asyncio_monkey',
description='Simple lru_cache for asyncio',
long_description=read('README.rst'),
extras_require={
':python_version=="3.3"': ['asyncio'],
},
py_modules=['asyncio_monkey'],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['asyncio', 'monkey patch'],
)
| [
"hellysmile@gmail.com"
] | hellysmile@gmail.com |
5bdcf9044e8d6f72c9c4ff65ce03136b1cf7aefe | d8498ca05daa83f108a231c024b4efa2f4b71747 | /ex13_8.py | 91f418127e41f7460d0dfa1e63b5135d5cbddbf7 | [
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | antoalv19/TP_solutions | ca9baf22ca1d16bac1f1bbabdefe473bb1b0ac0a | 7c1d3b35c11cfbd9c66be9878aafa1dbe8802581 | refs/heads/master | 2020-07-03T14:31:44.626360 | 2020-01-04T20:01:27 | 2020-01-04T20:01:27 | 201,936,524 | 0 | 0 | null | 2019-08-12T13:32:04 | 2019-08-12T13:32:03 | null | UTF-8 | Python | false | false | 1,735 | py | import random
import string
from collections import defaultdict
def read_and_analyze(filename, skip_header=True):
'''Read a text file and perform Markov analysis.
structure: dict[prefix] = suffix
Returns a markov dict
'''
d = defaultdict(list)
with open(filename, encoding="utf8") as fin:
if skip_header:
skip_gutenberg_header(fin)
for line in fin:
line = line.replace('-', '')
line_split = line.split()
for i in range(0,len(line_split)-1):
strippables = string.punctuation + string.whitespace
word = line_split[i]
word = word.strip(strippables).lower()
# simple behavior with low effiency
d[word].append(line_split[i+1])
# This method could have problem with next line
return d
def generate_random_content(d, prefix_len=2, text_len=50):
'''Generate a random text based on a given markov dict
Starts again if raised error
'''
des = []
word_list = []
for word in list(d.keys()):
if len(word) == prefix_len:
word_list.append(word)
first = random.choice(word_list)
des.append(first)
first_s = d[first]
index2 = random.randint(0,len(first_s)-1)
for i in range(text_len):
previous = first_s[index2]
sub = d[previous]
random_index = random.randint(0,len(sub)-1)
des.append(sub[random_index])
print(' '.join(des))
def skip_gutenberg_header(fp):
"""Reads from fp until it finds the line that ends the header.
fp: open file object
copied from author's answer
"""
for line in fp:
if line.startswith('*END*THE SMALL PRINT!'):
break
def main():
d = read_and_analyze('emma.txt')
#for prefix, suffix in d.items():
# print(prefix, suffix)
print("Generating random text")
generate_random_content(d)
if __name__ == '__main__':
main() | [
"dixingxu@gmail.com"
] | dixingxu@gmail.com |
7888f9d838d0b3092c11ba496518c7f5bda7104d | 453e53e9074c1657b3d04c3d3c07b8b21601852a | /.history/scraping_vivino_20210205211247.py | 94882c0a467b5cae6a96af69c9894569de42a2ed | [] | no_license | kristiewirth/vivino-data-analysis | 58fd66fb3b8d8a098dab5bc030b0110b287f6e22 | 8c02c14926fdd6dbb493b68b64daa27a7242780f | refs/heads/main | 2023-02-28T02:44:15.130286 | 2021-02-06T05:25:59 | 2021-02-06T05:25:59 | 336,463,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | import pprint as pprint
import pandas as pd
import requests
import seaborn as sns
import numpy as np
df = pd.DataFrame(columns=["wine", "rating", "price"])
for page_num in np.arange(1, 10, 1):
r = requests.get(
"https://www.vivino.com/api/explore/explore",
params={
"currency_code": "US",
"min_rating": "1",
"page": page_num,
# "price_range_max": "100",
"price_range_min": "9",
"order_by": "price",
"order": "asc",
},
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"
},
)
for record in r.json()["explore_vintage"]["matches"]:
try:
wine = record["vintage"]["name"]
rating = record["vintage"]["statistics"]["ratings_average"]
price = record["price"]["amount"]
df = df.append(
pd.DataFrame(
[[wine, rating, price]], columns=["wine", "rating", "price"]
)
)
except Exception:
pass
df.reset_index(inplace=True, drop=True)
df.sort_values(by="price", ascending=True, inplace=True)
df.to_csv("vivino-ratings.csv")
############################################################
df = pd.read_csv("vivino-ratings.csv")
# Graphing
sns.regplot(data=df, x="price", y="rating")
# Making summary df
df["rounded_price"] = 5 * round(df["price"] / 5)
rounded_ratings_df = (
pd.DataFrame(df.groupby("rounded_price")["rating"].mean())
.sort_values(by="rounded_price", ascending=True)
.reset_index(drop=False)
)
rounded_ratings_df["rounded_rating"] = round(rounded_ratings_df["rating"], 2)
# Calculating increases in ratings over time
previous = 0
diffs = []
for i, row in rounded_ratings_df.iterrows():
if previous == 0:
diffs.append(0)
else:
diff = row["rating"] - previous
diffs.append(diff)
previous = row["rating"]
rounded_ratings_df["increase_in_rating"] = diffs
rounded_ratings_df.drop("rating", axis=1, inplace=True) | [
"kristie.ann.wirth@gmail.com"
] | kristie.ann.wirth@gmail.com |
71d52862afb3a6831974332d3e21dc231baa6c9b | 9d30115d59ed821a5c7aecf2318b5e0ed22c9676 | /src/codewars/python/5kyu/to_camel_case.py | 4b2b5f97106701d81ffce74746cf93b6f058d660 | [] | no_license | garigari-kun/til | 02c7bf05274d1077b454e1f7d4a7355849441524 | b71f36a66045ab7da7f4a97f7e18de2aaa05f493 | refs/heads/master | 2020-04-16T02:13:45.727909 | 2018-12-16T01:26:40 | 2018-12-16T01:26:40 | 56,369,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | """
Complete the method/function so that it converts dash/underscore delimited words into camel casing.
The first word within the output should be capitalized only if the original word was capitalized.
Examples:
# returns "theStealthWarrior"
to_camel_case("the-stealth-warrior")
# returns "TheStealthWarrior"
to_camel_case("The_Stealth_Warrior")
"""
def to_camel_case(text):
if not len(text):
return ''
result = ''
word_list = []
text = text.replace('_', '-')
if '-' in text:
word_list = text.split('-')
print(word_list)
for index, word in enumerate(word_list):
if index == 0:
result += word
else:
if word[0].islower():
result += word[0].capitalize() + word[1:]
else:
result += word
return result
if __name__ == '__main__':
print(to_camel_case("the-stealth-warrior"))
print(to_camel_case("The_Stealth_Warrior"))
print(to_camel_case("The-pippi_was_Hungry"))
| [
"keisuke.cs@gmail.com"
] | keisuke.cs@gmail.com |
8c1bc0ee50750025de12f5b5afc1698f858033f0 | b9865b85f99ece1cb92038a53a6f9f205fe69bc2 | /sddsToolkit/printpage.py | 7e7d5337c0cdbe2352afce9ff19b5f781e932f8a | [] | no_license | Tubbz-alt/SDDSTOOLS | 43aa32dc556c8e1886eb408b09fb600bc6da91a3 | d2a896277d6a516d7138c493a51119ac50c586c6 | refs/heads/master | 2021-05-29T08:49:46.271548 | 2014-10-22T17:58:14 | 2014-10-22T17:58:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from sdds import sddsdata
def printpage(sddsobj):
numberOfColumns = len(sddsobj.columnName)
a=[[] for i in range(numberOfColumns) ]
print a
# for i in range(numberOfParameters):
# sddsobj.parameterData[i].append(sddsdata.GetParameter(sddsobj.index,i))
for i in range(numberOfColumns):
a[i].append(sddsdata.GetColumn(sddsobj.index,i))
print a
print a[1][0][0]
| [
"joelfred@slac.stanford.edu"
] | joelfred@slac.stanford.edu |
4196e6ba86de68c23b2e601d85a6e2f9d3a8ec74 | 24c5b944717b35db74fe33c494ded4194b53aeca | /sephiroth/__init__.py | 6cf205d176b48d14b97c2b3b6f83b8b5d367ecc1 | [
"WTFPL"
] | permissive | zevlag/sephiroth | bd1f42f697fb8dffbc38f5388636ff848de72b0f | 4c77aee5880751962e29b9a439a17c9c5e725a40 | refs/heads/main | 2023-03-12T07:24:53.636547 | 2021-02-26T18:45:10 | 2021-02-26T18:45:10 | 342,656,892 | 0 | 0 | WTFPL | 2021-02-26T17:56:09 | 2021-02-26T17:56:09 | null | UTF-8 | Python | false | false | 128 | py | __author__ = "0xdade"
__maintainer__ = "0xdade"
__email__ = "dade@actualcrimes.org"
__license__ = "WTFPL"
__version__ = "1.0.2"
| [
"0xdade@users.noreply.github.com"
] | 0xdade@users.noreply.github.com |
466e264e0eb65c0ff42c8b1860796f28bd68de10 | 3c24f5e9a513f447d2d9f4c912e020775e15bebe | /dbm.py | d8a5a65cd6fc91862b53ab2526b266a7120d54ad | [
"MIT"
] | permissive | GiggleLiu/QuRBM | 2ef5659640f272dd0bd7500862f78e1770e5376d | 2cb16e534ccbf875b88c164837bb8ffada5a2b03 | refs/heads/master | 2021-01-23T01:45:44.578449 | 2017-05-01T16:40:05 | 2017-05-01T16:40:05 | 85,930,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,550 | py | '''
Restricted Boltzmann Machine.
'''
from numpy import *
import numbers,pdb
from scipy.special import expit
from sstate import SparseState
from group import NoGroup
from utils import fh
__all__=['DBM','random_dbm']
class DBM(object):
'''
Restricted Boltzmann Machine class.
Attributes:
:a: 1darray, the bias for input layer
:b_L: list of 1darray, biases for hidden layers.
:W_L: list of 2darray, the weights
:group: Group, translation group.
:nin,nhid: int, number of input and hidden layer, (nin,nh) = shape(W)
'''
def __init__(self,b_L,W_L,group=NoGroup(),var_mask=None,input_node_type='linear',hidden_node_type='linear'):
self.b_L,self.W_L=b_L,W_L
self.group=group
if var_mask is None:
var_mask=[True]*(len(b_L)+len(W_L))
else:
if len(var_mask)!=len(b_L)+len(W_L):raise ValueError('number of variable mask not match.')
self.var_mask=var_mask
self.input_node_type,self.hidden_node_type=input_node_type,hidden_node_type
#check data
for i in xrange(len(W_L)):
w=W_L[i]
bl=b_L[i]
br=b_L[i+1]
if w.shape!=(len(bl),len(br)):
raise ValueError('Matrix-bias shape mismatch.')
if not len(self.b_L)==len(self.W_L)+1: raise ValueError('# of layer weights and biases not match.')
def __str__(self):
return '<DBM>\n%s\n%s\nGroup = %s'%('\n'.join(['b(%s) %s'%(i,b) for i,b in enumerate(self.b_L)]),\
'\n'.join(['W(%s,%s) %s'%(i,i+1,W) for i,W in enumerate(self.W_L)]),self.group)
def __repr__(self):
return '<DBM> in[%s] hid[%s]'%(self.nin,' x '.join([str(len(b)) for b in self.b_L[1:]]))
@property
def num_layers(self):return len(self.b_L)
@property
def nin(self): return len(self.b_L[0])
@property
def weight_dtype(self):
return self.W[0].dtype
def layer_dim(self,i):
'''dimension of i-th layer.'''
return len(self.b_L[i])
def get_W0_nogroup(self):
'''Get the group expanded W.'''
return self.group.unfold_W(self.W_L[0])
def get_a_nogroup(self):
'''Get the group expanded a.'''
return self.group.unfold_a(self.b_L[0])
def feed_input(self,v):
'''
Feed visible inputs, and get output in hidden layers.
Parameters:
:v: 1d array, input vector.
Return:
1darray, raw output in hidden nodes.
'''
for W,b in zip(self.W_L,self.b_L[1:]):
v=v.dot(W)+b
if self.hidden_node_type=='binary':
v=expit(v)
return v
def feed_hidden(self,h):
'''
Feed hidden inputs, and reconstruct visible layers.
Parameters:
:h: 1d array, input vector.
Return:
1darray, raw output in input nodes.
'''
for W,b in zip(self.W_L,self.b_L):
if h.ndim>1:
res=self.get_W_nogroup().dot(h.T).T+self.get_a_nogroup()
else:
res=self.get_W_nogroup().dot(h)+self.get_a_nogroup()
if self.input_node_type=='binary':
return expit(res)
else:
return res
def tovec(self,spaceconfig): #poor designed interface.
'''
Get the state vector.
\Psi(s,W)=\sum_{\{hi\}} e^{\sum_j a_j\sigma_j^z+\sum_i b_ih_i +\sum_{ij}W_{ij}h_i\sigma_j}
'''
return self.get_weight(config=1-2*spaceconfig.ind2config(arange(spaceconfig.hndim)))
def get_weight(self,config,theta=None):
'''
Get the weight for specific configuration.
Parameters:
:config: 1darray,
:theta: 1darray/None, table of hidden layer output: b+v.dot(W), intended to boost operation.
Return:
number,
'''
group=self.group
if theta is None: theta=self.feed_input(config)
return exp(sum([group.apply(asarray(config),ig).dot(self.a) for ig in xrange(group.ng)],axis=0))*prod(fh(theta),axis=-1)
def dump_arr(self):
'''Dump values to an array.'''
return concatenate([b for b,mask in zip(self.b_L,self.var_mask[:self.num_layers]) if mask]+\
[W.ravel() for W,mask in zip(self.W_L,self.var_mask[self.num_layers:]) if mask])
def load_arr(self,v):
'''Load data from an array.'''
offset=0
for b,mask in zip(self.b_L,self.var_mask[:self.num_layers]):
if mask:
layer_size=len(b)
b[:]=v[offset:offset+layer_size]
offset+=layer_size
for W,mask in zip(self.W_L,self.var_mask[self.num_layers:]):
if mask:
layer_size=W.shape[0]*W.shape[1]
W[...]=v[offset:offset+layer_size].reshape(W.shape)
offset+=layer_size
def random_dbm(dims,group=NoGroup(),dtype='complex128',magnitude=2e-2,**kwargs):
'''Get a random Restricted Boltzmann Machine'''
num_layers=len(dims)
b_L,W_L=[],[]
if dtype=='complex128':
rng=lambda shape:random.uniform(-magnitude,magnitude,shape)+1j*random.uniform(-magnitude,magnitude,shape)
elif dtype=='float64':
rng=lambda shape:random.uniform(-magnitude,magnitude,shape)
else:
raise ValueError('unsupported dtype %s'%dtype)
for i in xrange(num_layers):
b_L.append(rng(dims[i]))
if i!=0:
W_L.append(rng((dims[i-1],dims[i])))
return DBM(b_L=b_L,W_L=W_L,group=group,**kwargs)
| [
"cacate0129@gmail.com"
] | cacate0129@gmail.com |
d93af5bb78793b79096c1c32ccf6655fe77bfd53 | 74d0235c4eed1e4bc57dd906d2b3958cb48b9dba | /torch/fx/experimental/fx2trt/example/unittests.py | a8d8d4cb93d08263d3c834e4a91fbda507846076 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | anjali411/pytorch | a31ecf84fe892f19452b1063f2b1de1f88d84bb0 | 51b67f2bca3014aa5e7f675237543b8f82743032 | refs/heads/master | 2022-07-22T16:58:56.800837 | 2021-10-14T17:22:15 | 2021-10-14T17:23:55 | 208,863,312 | 1 | 0 | NOASSERTION | 2020-05-14T06:54:25 | 2019-09-16T17:56:13 | C++ | UTF-8 | Python | false | false | 4,052 | py | import torch
from torch.ao.quantization.quantize_fx import (
prepare_fx,
convert_fx,
get_tensorrt_backend_config_dict
)
import torch.fx.experimental.fx_acc.acc_tracer as acc_tracer
from torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.common_quantization import NodeSpec as ns
import unittest
def lower_to_trt(model, sample_input, shape_ranges):
model = acc_tracer.trace(model, [sample_input]) # type: ignore[attr-defined]
interp = TRTInterpreter(
model,
[InputTensorSpec(
torch.Size([-1, *sample_input.shape[1:]]), torch.float,
shape_ranges=shape_ranges, has_batch_dim=True)],
explicit_batch_dimension=True, explicit_precision=True)
engine, input_names, output_names = interp.run(fp16_mode=False, int8_mode=True)
trt_mod = TRTModule(engine, input_names, output_names)
return trt_mod
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
class TestQuantizeFxTRT(QuantizationTestCase):
def test_conv(self):
class Conv2d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv2d(*args)
def forward(self, x):
return self.conv(x)
conv2d_input = torch.rand(1, 3, 224, 224)
conv2d_module_args = (3, 3, 3)
m = Conv2d(*conv2d_module_args).eval()
qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.observer.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=torch.ao.quantization.default_weight_observer
)
prepared = prepare_fx(m, {"": qconfig}, backend_config_dict=get_tensorrt_backend_config_dict())
# calibration
prepared(conv2d_input)
quantized = convert_fx(prepared, is_reference=True)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(quantized, expected_node_occurrence=node_occurrence)
# lower to trt
trt_mod = lower_to_trt(quantized, conv2d_input, [((1, 3, 224, 224), (5, 3, 224, 224), (10, 3, 224, 224))])
# make sure it runs
trt_mod(conv2d_input.cuda())
def test_linear(self):
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
m = LinearModule().eval()
qconfig = torch.ao.quantization.QConfig(
activation=torch.ao.quantization.observer.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=torch.ao.quantization.default_weight_observer
)
prepared = prepare_fx(m, {"": qconfig}, backend_config_dict=get_tensorrt_backend_config_dict())
# calibration
prepared(linear_module_input)
quantized = convert_fx(prepared, is_reference=True)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(quantized, expected_node_occurrence=node_occurrence)
# lower to trt
trt_mod = lower_to_trt(
quantized,
linear_module_input,
[((1, *linear_module_input.shape[1:]),
(5, *linear_module_input.shape[1:]),
(10, *linear_module_input.shape[1:]))])
# make sure it runs
trt_mod(linear_module_input.cuda())
if __name__ == '__main__':
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f700721c9081d5fcaafc1f6a00bf77aae404a0d3 | 8ad573e455450ce45b8dfd1cb995a8ce43f9a114 | /core/result/failure/bad_request.py | a7b271b3de9e8ba75db86d3542a964b797fee057 | [] | no_license | afsaneh92/dr_autol | 4da458b8a6682603c227e34a1a827a5918d40831 | a1bd6d55ce9b67543ad7387631c48440dd38f68d | refs/heads/master | 2022-12-23T08:32:47.851059 | 2019-12-10T13:51:24 | 2019-12-10T13:51:24 | 227,130,083 | 0 | 0 | null | 2022-12-08T01:05:23 | 2019-12-10T13:38:52 | Python | UTF-8 | Python | false | false | 132 | py | from core.result import Result
class BadRequest(Result):
def dictionary_creator(self):
return {"status": self.status}
| [
"forafsaneh.91@gmail.com"
] | forafsaneh.91@gmail.com |
b67237062e950386e920e3e37c3bf871a80d73e4 | f9f0ddbb211bde92316ca746938688e7c82e2fe0 | /flask-test/flask/app/recipes/routes.py | aa2d6e07e23264d7363001d65eaa43fce1987817 | [] | no_license | Todai88/Python-Refresher | c93475cc08b2ed6905901f0f615bc446820382d8 | a74a7e878906201ed7f09a17d44fc94d1137eb3a | refs/heads/master | 2020-04-06T07:38:13.539997 | 2018-11-19T21:01:46 | 2018-11-19T21:01:46 | 157,279,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from flask import render_template
from . import recipes_blueprint
@recipes_blueprint.route('/')
def index():
return render_template('recipes/index.html')
| [
"joabaj88@gmail.com"
] | joabaj88@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.