blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
806c0d4bfb02f750662a45711cbf31d002796237 | 205861f3dc228f78eb6544ef5ed987175938eb84 | /celery/wait_rabbitmq.py | eb3b23a6c7d7cd6ca05112587be942d710b9cd5a | [] | no_license | Muzque/nicetomeetyou | c2e0e4e9fdccdf4969b1b721ad95654e486da84b | baf2db0d2a60ab51fce956f70c9acc55a3181bf9 | refs/heads/master | 2020-04-05T06:25:24.316156 | 2018-11-11T11:08:01 | 2018-11-11T11:08:01 | 156,637,397 | 0 | 0 | null | 2018-11-08T02:16:50 | 2018-11-08T02:16:49 | null | UTF-8 | Python | false | false | 1,169 | py | import argparse
import pika
import time
parser = argparse.ArgumentParser(description='Check connection.')
parser.add_argument('--server', default='rabbitmq')
parser.add_argument('--virtual_host', default='celery_vhost')
parser.add_argument('--ssl', action='store_true')
parser.add_argument('--port', type=int, default='5672')
parser.add_argument('--username', default='celery')
parser.add_argument('--password', default='pw123456')
args = vars(parser.parse_args())
credentials = pika.PlainCredentials(args['username'], args['password'])
parameters = pika.ConnectionParameters(host=args['server'],
port=args['port'],
virtual_host=args['virtual_host'],
credentials=credentials,
ssl=args['ssl'])
while True:
try:
connection = pika.BlockingConnection(parameters)
if connection.is_open:
print("RabbitMQ successful connected.")
connection.close()
break
except Exception as e:
print("RabbitMQ not responds... :{}".format(e))
time.sleep(1.0)
| [
"zerosky1943@gmail.com"
] | zerosky1943@gmail.com |
a7cc0d1ab36a3ee26d3b0a4230bda54f1ea0db08 | 246e9200a834261eebcf1aaa54da5080981a24ea | /ctci/arrays-and-strings/1-unique-characters-in-string.py | 73a46444484f9c5f684bb78e31fca563eff94d32 | [] | no_license | kalsotra2001/practice | db435514b7b57ce549b96a8baf64fad8f579da18 | bbc8a458718ad875ce5b7caa0e56afe94ae6fa68 | refs/heads/master | 2021-12-15T20:48:21.186658 | 2017-09-07T23:01:56 | 2017-09-07T23:01:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import sys
def unique(s):
char = [False] * 128
if len(s) > 128:
return False
for c in s:
if char[ord(c)] == True:
return False
else:
char[ord(c)] = True
return True
s = sys.stdin.readline().strip()
if unique(s):
print "Unique."
else:
print "Not unique." | [
"jacquelineluo95@gmail.com"
] | jacquelineluo95@gmail.com |
aa686c33f11968bbdc3d862cf0fce99117109547 | b76e39e535499704368eddc26237dc0016ef7d06 | /LF/fn_ProportionValve_V3.py | 5557424e4d8b523e9634e2cbfadd48247997997f | [] | no_license | BUBAIMITRA2018/castersimulation | 0532e53df7d346c2824e577cc91cd0ac2ce4694c | eca5fddff5c0f33f785168f6b1e9f572c1622be0 | refs/heads/master | 2022-12-10T02:45:04.207196 | 2020-09-09T05:35:54 | 2020-09-09T05:35:54 | 260,110,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,700 | py | import time
from event_V2 import *
from clientcomm_v1 import *
from readgeneral_v2 import *
from writegeneral_v2 import *
import logging
import gc
logger = logging.getLogger("main.log")
__all__ = ['FN_ProportionalValve']
class FN_ProportionalValve(Eventmanager):
def __init__(self,com,df,idxNo,filename):
self._idxNo =idxNo
self.filename = filename
self.gen = com
self._positionsp = 0.0
self.df = df
self.devicename = df.iloc[self._idxNo, 0]
self.setup()
self.initilizedigitalinput()
super().__init__(lambda: self.Proportionalprocess())
def setup(self):
try:
for tag,col in self.readalltags():
if col==3:
self.areatag = str(tag)
if col == 4:
self.possetpointtag = str(tag)
if col == 5:
self.upposlimitswtag = str(tag)
if col == 6:
self.downposlimitswtag =str(tag)
except Exception as e:
level = logging.ERROR
messege = "FN_ProportionalValve" + self.devicename + " Error messege(setup)" + str(e.args)
logger.log(level, messege)
log_exception(e)
def initilizedigitalinput(self):
try:
self.Proportionalprocess()
except Exception as e:
level = logging.ERROR
messege = "FN_ProportionalValve" + self.devicename + " Error messege(initilization)" + str(e.args)
logger.log(level, messege)
def Proportionalprocess(self):
try:
client = Communication()
sta_con_plc = client.opc_client_connect(self.filename)
readgeneral = ReadGeneral(sta_con_plc)
writegeneral = WriteGeneral(sta_con_plc)
self.currentvalue = readgeneral.readsymbolvalue(self.possetpointtag, 'S7WLWord', 'PA')
print("proportional valve start")
print("current value is ", self.currentvalue)
if self.currentvalue == 8294:
writegeneral.writesymbolvalue(self.upposlimitswtag, 0, 'S7WLBit')
time.sleep(1)
writegeneral.writesymbolvalue(self.downposlimitswtag, 1, 'S7WLBit')
level = logging.WARNING
messege = self.devicename + ":" + self.downposlimitswtag + " value is" + "1"
logger.log(level, messege)
if self.currentvalue == 19353:
writegeneral.writesymbolvalue(self.downposlimitswtag, 0, 'S7WLBit')
time.sleep(5)
writegeneral.writesymbolvalue(self.upposlimitswtag, 1, 'S7WLBit')
level = logging.WARNING
messege = self.devicename + ":" + self.downposlimitswtag + " value is" + "1"
logger.log(level, messege)
sta_con_plc.disconnect()
gc.collect()
except Exception as e:
level = logging.ERROR
messege = "FN_ProportionalValve" + self.devicename + " Error messege(process)" + str(e.args)
logger.log(level, messege)
log_exception(e)
@property
def PosSetpoint(self):
return self._positionsp
@PosSetpoint.setter
def PosSetpoint(self, value):
if value != self._positionsp:
super().fire()
self._positionsp = value
@property
def areaname(self):
return self.areatag
def readalltags(self):
n = 3
row, col = self.df.shape
print(col)
while n < col:
data = self.df.iloc[self._idxNo, n]
yield data,n
n = n + 1
| [
"subrata.mitra@sms-group.com"
] | subrata.mitra@sms-group.com |
860b79c446c1239bedd54c874410e6b544f97def | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/-57053121/win32net.py | ff9e6a1b782e3fcae5c883d127212d2b12f61488 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,797 | py | # encoding: utf-8
# module win32net
# from C:\Users\Doly\Anaconda3\lib\site-packages\win32\win32net.pyd
# by generator 1.147
""" A module encapsulating the Windows Network API. """
# imports
from pywintypes import error
# Variables with simple values
SERVICE_SERVER = 'LanmanServer'
SERVICE_WORKSTATION = 'LanmanWorkstation'
USE_FORCE = 1
USE_LOTS_OF_FORCE = 2
USE_NOFORCE = 0
# functions
def NetFileClose(*args, **kwargs): # real signature unknown
pass
def NetFileEnum(*args, **kwargs): # real signature unknown
pass
def NetFileGetInfo(*args, **kwargs): # real signature unknown
pass
def NetGetAnyDCName(*args, **kwargs): # real signature unknown
pass
def NetGetDCName(*args, **kwargs): # real signature unknown
pass
def NetGetJoinInformation(*args, **kwargs): # real signature unknown
pass
def NetGroupAdd(*args, **kwargs): # real signature unknown
pass
def NetGroupAddUser(*args, **kwargs): # real signature unknown
pass
def NetGroupDel(*args, **kwargs): # real signature unknown
pass
def NetGroupDelUser(*args, **kwargs): # real signature unknown
pass
def NetGroupEnum(*args, **kwargs): # real signature unknown
pass
def NetGroupGetInfo(*args, **kwargs): # real signature unknown
pass
def NetGroupGetUsers(*args, **kwargs): # real signature unknown
pass
def NetGroupSetInfo(*args, **kwargs): # real signature unknown
pass
def NetGroupSetUsers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupAdd(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupAddMembers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupDel(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupDelMembers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupEnum(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupGetInfo(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupGetMembers(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupSetInfo(*args, **kwargs): # real signature unknown
pass
def NetLocalGroupSetMembers(*args, **kwargs): # real signature unknown
pass
def NetMessageBufferSend(*args, **kwargs): # real signature unknown
pass
def NetMessageNameAdd(*args, **kwargs): # real signature unknown
pass
def NetMessageNameDel(*args, **kwargs): # real signature unknown
pass
def NetMessageNameEnum(*args, **kwargs): # real signature unknown
pass
def NetServerComputerNameAdd(*args, **kwargs): # real signature unknown
pass
def NetServerComputerNameDel(*args, **kwargs): # real signature unknown
pass
def NetServerDiskEnum(*args, **kwargs): # real signature unknown
pass
def NetServerEnum(*args, **kwargs): # real signature unknown
pass
def NetServerGetInfo(*args, **kwargs): # real signature unknown
pass
def NetServerSetInfo(*args, **kwargs): # real signature unknown
pass
def NetSessionDel(*args, **kwargs): # real signature unknown
pass
def NetSessionEnum(*args, **kwargs): # real signature unknown
pass
def NetSessionGetInfo(*args, **kwargs): # real signature unknown
pass
def NetShareAdd(*args, **kwargs): # real signature unknown
pass
def NetShareCheck(*args, **kwargs): # real signature unknown
pass
def NetShareDel(*args, **kwargs): # real signature unknown
pass
def NetShareEnum(*args, **kwargs): # real signature unknown
""" Obsolete Function,Level 1 call """
pass
def NetShareGetInfo(*args, **kwargs): # real signature unknown
pass
def NetShareSetInfo(*args, **kwargs): # real signature unknown
pass
def NetStatisticsGet(*args, **kwargs): # real signature unknown
pass
def NetUseAdd(*args, **kwargs): # real signature unknown
pass
def NetUseDel(*args, **kwargs): # real signature unknown
pass
def NetUseEnum(*args, **kwargs): # real signature unknown
pass
def NetUseGetInfo(*args, **kwargs): # real signature unknown
pass
def NetUserAdd(*args, **kwargs): # real signature unknown
pass
def NetUserChangePassword(*args, **kwargs): # real signature unknown
pass
def NetUserDel(*args, **kwargs): # real signature unknown
pass
def NetUserEnum(*args, **kwargs): # real signature unknown
pass
def NetUserGetGroups(*args, **kwargs): # real signature unknown
""" Updated - New Behavior """
pass
def NetUserGetInfo(*args, **kwargs): # real signature unknown
pass
def NetUserGetLocalGroups(*args, **kwargs): # real signature unknown
""" Updated - New Behavior """
pass
def NetUserModalsGet(*args, **kwargs): # real signature unknown
pass
def NetUserModalsSet(*args, **kwargs): # real signature unknown
pass
def NetUserSetInfo(*args, **kwargs): # real signature unknown
pass
def NetValidateName(*args, **kwargs): # real signature unknown
pass
def NetValidatePasswordPolicy(*args, **kwargs): # real signature unknown
pass
def NetWkstaGetInfo(*args, **kwargs): # real signature unknown
pass
def NetWkstaSetInfo(*args, **kwargs): # real signature unknown
pass
def NetWkstaTransportAdd(*args, **kwargs): # real signature unknown
pass
def NetWkstaTransportDel(*args, **kwargs): # real signature unknown
pass
def NetWkstaTransportEnum(*args, **kwargs): # real signature unknown
pass
def NetWkstaUserEnum(*args, **kwargs): # real signature unknown
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x000001DF95825470>'
__spec__ = None # (!) real value is "ModuleSpec(name='win32net', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x000001DF95825470>, origin='C:\\\\Users\\\\Doly\\\\Anaconda3\\\\lib\\\\site-packages\\\\win32\\\\win32net.pyd')"
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
ab763d211a9c24f4b40133a63293a7644e5bc85f | 19ab1499f904fc7065cf76eb518a457d1cad08c9 | /accounts/models.py | 6a9b925e0f4b3cfd73ab40258c5cce0c4c4d91d5 | [] | no_license | mathemartins/moli | 5a2637c94ec8953b42fba369b2fed5bdd3a83be6 | c945b90920659025013fe6133dfd5a0abac61788 | refs/heads/master | 2021-05-03T06:27:21.472267 | 2018-03-04T18:50:18 | 2018-03-04T18:50:18 | 120,595,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,327 | py | from django.db import models
from django.db.models.signals import pre_save, post_save
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from django.utils import timezone
# Create your models here.
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from accounts.utils import user_code_generator
User = get_user_model()
def upload_location(instance, filename):
return "%s/%s" %(instance.id, filename)
gender_ = (
('Male', 'Male'),
('Female', 'Female'),
('Other', 'Other'),
)
years = (
("1 - 5 years", "1 - 5 years"),
("6 - 13 years", "6 - 13 years"),
("14 - 20 years", "14 - 20 years"),
("20 years and above", "20 years and above"),
)
UserType = (
("Investor", "Investor"),
("StartUp", "StartUp"),
)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
user_code = models.CharField(max_length=5)
user_type = models.CharField(choices=UserType, blank=True, null=True, max_length=100)
image = ProcessedImageField(upload_to=upload_location, processors=[ResizeToFill(150, 150)],
format='JPEG', options={'quality':100}, null=True, blank=True)
is_member = models.BooleanField(default=False, verbose_name="Premium Account")
skill_set = models.CharField(default="I don't have any.", max_length=100)
years_of_experience = models.CharField(choices=years, blank=True, null=True, max_length=100)
mobile_number = models.CharField(max_length=11)
street = models.CharField(max_length=100, blank=True, null=True)
city = models.CharField(max_length=100, blank=True, null=True)
state = models.CharField(max_length=100, blank=True, null=True)
zip_code = models.CharField(max_length=100, blank=True, null=True)
country = models.CharField(max_length=100, blank=True, null=True)
gender = models.CharField(choices=gender_, max_length=100)
slug = models.SlugField(null=True, blank=True)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
# objects = ProfileManager()
class Meta:
ordering = ["-timestamp", "-updated"]
def __str__(self):
return str(self.user)
def new_user_signal(sender, instance, created, *args, **kwargs):
pass
| [
"mathegeniuse@gmail.com"
] | mathegeniuse@gmail.com |
58aede70705c5b0dd846bea68d5953645bf246f6 | 65f9576021285bc1f9e52cc21e2d49547ba77376 | /cdsp_proc/core/securemsm/cryptodrivers/prng/build/SConscript | ac11a7dc6ab6c6c833edf3300ae6c0378f6e7fc7 | [] | no_license | AVCHD/qcs605_root_qcom | 183d7a16e2f9fddc9df94df9532cbce661fbf6eb | 44af08aa9a60c6ca724c8d7abf04af54d4136ccb | refs/heads/main | 2023-03-18T21:54:11.234776 | 2021-02-26T11:03:59 | 2021-02-26T11:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | #===============================================================================
#
# Crypto Driver Libraries
#
# GENERAL DESCRIPTION
# build script
#
# Copyright (c) 2012 by QUALCOMM, Incorporated.
# All Rights Reserved.
# QUALCOMM Proprietary/GTDR
#
#-------------------------------------------------------------------------------
#
# $Header: //source/qcom/qct/core/bsp/config/msm7x30/main/latest/securemsm/build/SConscript#4 $
# $DateTime: 2009/10/07 11:43:48 $
# $Author: wduembeg $
# $Change: 1047079 $
# EDIT HISTORY FOR FILE
#
# This section contains comments describing changes made to the module.
# Notice that changes are listed in reverse chronological order.
#
# when who what, where, why
# -------- --- ---------------------------------------------------------
# 07/04/12 nk Initial version
#===============================================================================
Import('env')
env = env.Clone()
env.Replace(BUILDPATH = env.subst('${BUILDPATH}/${BUILD_ID}'))
CLEAN_SOURCES = env.FindFiles("*.h", "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/environment/")
env.CleanPack(['CORE_QDSP6_SW'], CLEAN_SOURCES)
CLEAN_SOURCES = env.FindFiles("*.h", "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/chipset/")
env.CleanPack(['CORE_QDSP6_SW'], CLEAN_SOURCES)
CLEAN_SOURCES = env.FindFiles("*", "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/test")
for x in env.FindFiles(['*.o', '*.lib', 'SConscript'], "${BUILD_ROOT}/core/securemsm/cryptodrivers/prng/test/build"):
CLEAN_SOURCES.remove(x)
env.CleanPack(['CORE_QDSP6_SW'],
CLEAN_SOURCES)
#-------------------------------------------------------------------------------
# Load sub scripts
#-------------------------------------------------------------------------------
env.LoadSoftwareUnits()
| [
"jagadeshkumar.s@pathpartnertech.com"
] | jagadeshkumar.s@pathpartnertech.com | |
5b49340e964b98109030cd66ae3252c3ba525436 | d2ad4a42770cd3b5c8738cdc6f11a3b63689123a | /examples/DeepQNetwork/common.py | 5e53e5c9192924e6f3562c328dcf02a1bdee57e0 | [
"Apache-2.0"
] | permissive | cpehle/tensorpack | bcdc55418e2e07779fd08f03742e9080a0854651 | 7e91eb48ce59b303b580140abbbfa42dc0319b21 | refs/heads/master | 2021-01-11T15:35:03.578953 | 2017-01-29T06:48:50 | 2017-01-29T06:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,462 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.predict import get_predict_func
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
global get_player
get_player = None
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg):
player = get_player(viz=0.01)
predfunc = get_predict_func(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predict_funcs, nr_eval):
class Worker(StoppableThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
player = get_player(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print "Score, ", score
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predict_funcs]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval):
func = get_predict_func(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Callback):
def __init__(self, nr_eval, input_names, output_names):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predict_func(
self.input_names, self.output_names)] * NR_PROC
def _trigger_epoch(self):
t = time.time()
mean, max = eval_with_funcs(self.pred_funcs, nr_eval=self.eval_episode)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.add_scalar_summary('mean_score', mean)
self.trainer.add_scalar_summary('max_score', max)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
17cc2d6112cbf07ea283510a6a4186bda20bb213 | 8246092010e656920e7199f889f9cbf54b83a729 | /pycoin/wallet/SQLite3Persistence.py | 23c29337a9bc3cb77f61dce4e3bb11dd047b7512 | [
"MIT"
] | permissive | richardkiss/pycoin | 5717411a11445773ac922c1d1c1b7dbe4835cd77 | b41ad7d02e52d9869a8c9f0dbd7d3b2b496c98c0 | refs/heads/main | 2023-08-07T12:14:04.974934 | 2023-04-18T02:27:15 | 2023-04-18T02:27:15 | 10,917,677 | 1,306 | 489 | MIT | 2023-06-03T23:24:50 | 2013-06-24T19:17:52 | Python | UTF-8 | Python | false | false | 7,582 | py | from pycoin.encoding.hexbytes import b2h, h2b, b2h_rev, h2b_rev
from pycoin.key.BIP32Node import BIP32Node
class SQLite3Persistence(object):
def __init__(self, sqlite3_db):
self.db = sqlite3_db
self._init_tables()
def _exec_sql(self, sql, *args):
c = self.db.cursor()
c.execute(sql, args)
return c
def commit(self):
self.db.commit()
def rollback(self):
self.db.rollback()
def _init_tables(self):
self._init_table_bip32key()
self._init_table_bip32node()
self._init_table_spendable()
self._init_table_globals()
self._init_other_tables()
def _init_other_tables(self):
pass
def _init_table_bip32key(self):
SQL = """create table if not exists BIP32Key (
id integer primary key,
slug text not null unique,
as_text text not null
);"""
self._exec_sql(SQL)
self.db.commit()
def bip32node_for_slug(self, slug):
c = self._exec_sql("select id, as_text from BIP32Key where slug=?", slug)
r = c.fetchone()
if r is None:
return None
bip32_node = BIP32Node.from_hwif(r[1])
bip32_node.id = r[0]
return bip32_node
def create_bip32node(self, slug, random_bytes):
bip32_node = BIP32Node.from_master_secret(random_bytes)
bip32_text = bip32_node.as_text(as_private=True)
self._exec_sql("insert into BIP32Key (slug, as_text) values (?, ?)", slug, bip32_text)
return self.bip32node_for_slug(slug)
def _init_table_bip32node(self):
SQL = """create table if not exists BIP32Node (
path text not null,
key_id integer,
address text unique,
unique(path, key_id)
);"""
self._exec_sql(SQL)
self.db.commit()
def add_bip32_path(self, bip32_node, path):
address = bip32_node.subkey_for_path(path).address()
key_id = bip32_node.id
self._exec_sql("insert or ignore into BIP32Node values (?, ?, ?)", path, key_id, address)
self.db.commit()
return address
def interesting_addresses(self):
c = self._exec_sql("select address from BIP32Node")
return (r[0] for r in c)
def secret_exponent_for_address(self, bip32_node, address):
c = self._exec_sql("select path from BIP32Node where key_id = ? and address = ?", bip32_node.id, address)
r = c.fetchone()
if r is None:
return r
path = r[0]
return bip32_node.subkey_for_path(path).secret_exponent()
def _init_table_globals(self):
SQL = """create table if not exists Global (
slug text primary key,
data text
);"""
self._exec_sql(SQL)
self.db.commit()
def set_global(self, slug, value):
self._exec_sql("insert or replace into Global values (?, ?)", slug, value)
def get_global(self, slug):
c = self._exec_sql("select data from Global where slug = ?", slug)
r = c.fetchone()
if r is None:
return r
return r[0]
def slugs(self):
for r in self._exec_sql("select slug from Global"):
yield r[0]
def _init_table_spendable(self):
SQL = ["""create table if not exists Spendable (
tx_hash text,
tx_out_index integer,
coin_value integer,
script text,
block_index_available integer,
does_seem_spent boolean,
block_index_spent integer,
unique(tx_hash, tx_out_index)
);""",
"create index if not exists Spendable_cv on Spendable (coin_value);",
"create index if not exists Spendable_bia on Spendable (block_index_available);",
"create index if not exists Spendable_bis on Spendable (block_index_spent);"]
for sql in SQL:
self._exec_sql(sql)
self.db.commit()
def save_spendable(self, spendable):
tx_hash = b2h_rev(spendable.tx_hash)
script = b2h(spendable.script)
self._exec_sql("insert or replace into Spendable values (?, ?, ?, ?, ?, ?, ?)", tx_hash,
spendable.tx_out_index, spendable.coin_value, script,
spendable.block_index_available, spendable.does_seem_spent,
spendable.block_index_spent)
def delete_spendable(self, tx_hash, tx_out_index):
self._exec_sql("delete from Spendable where tx_hash = ? and tx_out_index = ?",
b2h_rev(tx_hash), tx_out_index)
def spendable_for_hash_index(self, tx_hash, tx_out_index, spendable_class):
tx_hash_hex = b2h_rev(tx_hash)
SQL = ("select coin_value, script, block_index_available, "
"does_seem_spent, block_index_spent from Spendable where "
"tx_hash = ? and tx_out_index = ?")
c = self._exec_sql(SQL, tx_hash_hex, tx_out_index)
r = c.fetchone()
if r is None:
return r
return spendable_class(coin_value=r[0], script=h2b(r[1]), tx_hash=tx_hash,
tx_out_index=tx_out_index, block_index_available=r[2],
does_seem_spent=r[3], block_index_spent=r[4])
@staticmethod
def spendable_for_row(r, spendable_class):
return spendable_class(coin_value=r[2], script=h2b(r[3]), tx_hash=h2b_rev(r[0]), tx_out_index=r[1],
block_index_available=r[4], does_seem_spent=r[5], block_index_spent=r[6])
def all_spendables(self, spendable_class, qualifier_sql=""):
SQL = ("select tx_hash, tx_out_index, coin_value, script, block_index_available, "
"does_seem_spent, block_index_spent from Spendable " + qualifier_sql)
c1 = self._exec_sql(SQL)
while 1:
r = next(c1)
yield self.spendable_for_row(r, spendable_class)
def unspent_spendables(self, last_block, spendable_class, confirmations=0):
# we fetch spendables "old enough"
# we alternate between "biggest" and "smallest" spendables
SQL = ("select tx_hash, tx_out_index, coin_value, script, block_index_available, "
"does_seem_spent, block_index_spent from Spendable where "
"block_index_available > 0 and does_seem_spent = 0 and block_index_spent = 0 "
"%s order by coin_value %s")
if confirmations > 0:
prior_to_block = last_block + 1 - confirmations
t1 = "and block_index_available <= %d " % prior_to_block
else:
t1 = ""
c1 = self._exec_sql(SQL % (t1, "desc"))
c2 = self._exec_sql(SQL % (t1, "asc"))
seen = set()
while 1:
r = next(c2)
s = self.spendable_for_row(r, spendable_class)
name = (s.tx_hash, s.tx_out_index)
if name not in seen:
yield s
seen.add(name)
r = next(c1)
s = self.spendable_for_row(r, spendable_class)
name = (s.tx_hash, s.tx_out_index)
if name not in seen:
yield s
seen.add(name)
def unspent_spendable_count(self):
SQL = ("select count(*) from Spendable where does_seem_spent = 0"
" and block_index_available > 0 and block_index_spent = 0")
c = self._exec_sql(SQL)
r = c.fetchone()
return r[0]
def rewind_spendables(self, block_index):
SQL1 = ("update Spendable set block_index_available = 0 where block_index_available > ?")
self._exec_sql(SQL1, block_index)
SQL2 = ("update Spendable set block_index_spent = 0 where block_index_spent > ?")
self._exec_sql(SQL2, block_index)
| [
"him@richardkiss.com"
] | him@richardkiss.com |
0271004bd53c6495bea1b7d09dd2f9cd713b5bab | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_348/ch28_2020_04_01_16_19_34_337220.py | 510b48ff825567842eb0ccd1c5f6ae918874c9eb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | contador = 0
numero = 0
while contador < 99:
numero = numero + (1/2**contador)
contador = contador + 1
print(numero)
| [
"you@example.com"
] | you@example.com |
7dc694c928307813f2ccde47e1ff6de0f120e660 | 33d77a6caf1813fcb378671fd89e5bb1dec7f6f8 | /tsai/callback/core.py | 449505e7e55f4945889568ef0a8510b4d9be93d9 | [
"Apache-2.0"
] | permissive | kusumy/tsai | 6a1f3d4137131084062d1a5f942e8f7d23662abd | d6994896dd804cfed441adbb2b8dd4836b1dac4a | refs/heads/master | 2023-02-25T14:13:08.275441 | 2021-01-29T20:44:21 | 2021-01-29T20:44:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,198 | py | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/010_callback.core.ipynb (unless otherwise specified).
__all__ = ['GamblersCallback', 'TransformScheduler', 'ShowGraph', 'ShowGraphCallback2', 'UBDAug']
# Cell
from ..imports import *
from ..utils import *
from ..data.preprocessing import *
from ..data.transforms import *
from ..models.layers import *
from fastai.callback.all import *
# Cell
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
# Cell
class GamblersCallback(Callback):
"A callback to use metrics with gambler's loss"
def after_loss(self): self.learn.pred = self.learn.pred[..., :-1]
# Cell
class TransformScheduler(Callback):
"A callback to schedule batch transforms during training based on a function (sched_lin, sched_exp, sched_cos (default), etc)"
def __init__(self, schedule_func:callable, show_plot:bool=False):
self.schedule_func,self.show_plot = schedule_func,show_plot
self.mult = []
def before_fit(self):
for pct in np.linspace(0, 1, len(self.dls.train) * self.n_epoch): self.mult.append(self.schedule_func(pct))
# get initial magnitude values and update initial value
self.mag = []
self.mag_tfms = []
for t in self.dls.after_batch:
if hasattr(t, 'magnitude'):
self.mag.append(t.magnitude)
t.magnitude *= self.mult[0]
self.mag_tfms.append(t)
def after_batch(self):
if self.training and len(self.mag_tfms)>0 and self.train_iter < len(self.mult):
# set values for next batch
for t,m in zip(self.mag_tfms, self.mag):
t.magnitude = m * self.mult[self.train_iter]
def after_fit(self):
if self.show_plot and self.mult != [] and len(self.mag_tfms)>0:
print()
plt.plot(self.mult)
plt.title('Scheduled tfms')
plt.show()
print()
self.show_plot = False
# set values to initial values
for t,m in zip(self.mag_tfms, self.mag): t.magnitude = m
def __repr__(self):
return f'{self.__class__.__name__}({self.schedule_func})'
# Cell
class ShowGraph(ShowGraphCallback):
"(Modified) Update a graph of training and validation loss"
def after_epoch(self):
"Plot validation loss in the pbar graph"
if not self.nb_batches: return
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_min = min((min(rec.losses), min(val_losses)))
y_max = max((max(rec.losses), max(val_losses)))
margin = (y_max - y_min) * .05
y_bounds = (y_min - margin, y_max + margin)
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
ShowGraphCallback2 = ShowGraph
# Cell
class UBDAug(Callback):
r"""A callback to implement the uncertainty-based data augmentation."""
def __init__(self, batch_tfms:list, N:int=2, C:int=4, S:int=1):
r'''
Args:
batch_tfms: list of available transforms applied to the combined batch. They will be applied in addition to the dl tfms.
N: # composition steps (# transforms randomly applied to each sample)
C: # augmented data per input data (# times N transforms are applied)
S: # selected data points used for training (# augmented samples in the final batch from each original sample)
'''
self.C, self.S = C, min(S, C)
self.batch_tfms = L(batch_tfms)
self.n_tfms = len(self.batch_tfms)
self.N = min(N, self.n_tfms)
def before_fit(self):
assert hasattr(self.loss_func, 'reduction'), "You need to pass a loss_function with a 'reduction' attribute"
self.red = self.loss_func.reduction
def before_batch(self):
if self.training:
with torch.no_grad():
setattr(self.loss_func, 'reduction', 'none')
for i in range(self.C):
idxs = np.random.choice(self.n_tfms, self.N, False)
x_tfm = compose_tfms(self.x, self.batch_tfms[idxs], split_idx=0)
loss = self.loss_func(self.learn.model(x_tfm), self.y).reshape(-1,1)
if i == 0:
x2 = x_tfm.unsqueeze(1)
max_loss = loss
else:
losses = torch.cat((max_loss, loss), dim=1)
x2 = torch.cat((x2, x_tfm.unsqueeze(1)), dim=1)
x2 = x2[np.arange(x2.shape[0]).reshape(-1,1), losses.argsort(1)[:, -self.S:]]
max_loss = losses.max(1)[0].reshape(-1,1)
setattr(self.loss_func, 'reduction', self.red)
x2 = x2.reshape(-1, self.x.shape[-2], self.x.shape[-1])
if self.S > 1: self.learn.yb = (torch_tile(self.y, 2),)
self.learn.xb = (x2,)
def __repr__(self): return f'UBDAug({[get_tfm_name(t) for t in self.batch_tfms]})' | [
"“oguiza@gmail.com”"
] | “oguiza@gmail.com” |
9c4e3df78a6a526942f1747c2c3ab68138a6105d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_retracting.py | 7a0b51d0632af5dd821d4f03d4dee4fe35e094e4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _RETRACTING():
def __init__(self,):
self.name = "RETRACTING"
self.definitions = retract
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['retract']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
97de807d29418219e7285b76b289be1bafcc58c8 | c609c9e9da9b716dde810334fe32cb65a3ddcff9 | /ddsc/remotestore.py | f5c98790fe650c115cff91c020eda36391e45622 | [
"MIT"
] | permissive | erichhuang/DukeDSClient | 16f858e743b7823f1797214b9ede1e520f73af4a | feccdbd0caed524e0728268be29a3987121712fa | refs/heads/master | 2020-12-24T21:45:24.371369 | 2016-02-22T18:30:06 | 2016-02-22T18:30:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,257 | py | from localstore import LocalContent, HashUtil
class RemoteContentFetch(object):
def __init__(self, data_service):
self.data_service = data_service
def fetch_remote_project(self, project_name, path_list):
project = self._get_my_project(project_name)
if project:
self._add_project_children(project)
return project
def _get_my_project(self, project_name):
response = self.data_service.get_projects().json()
for project in response['results']:
if project['name'] == project_name:
return RemoteProject(project)
return None
def _add_project_children(self, project):
response = self.data_service.get_project_children(project.id, '').json()
for child in response['results']:
self._add_child_recur(project, child)
def _add_child_recur(self, parent, child):
kind = child['kind']
if kind == 'dds-folder':
parent.add_child(self._read_folder(child))
elif kind == 'dds-file':
parent.add_child(RemoteFile(child))
else:
raise ValueError("Unknown child type {}".format(kind))
def _read_folder(self, folder_json):
folder = RemoteFolder(folder_json)
response = self.data_service.get_folder_children(folder.id, '').json()
for child in response['results']:
self._add_child_recur(folder, child)
return folder
class RemoteProject(object):
"""
Project data from a remote store projects request.
Represents the top of a tree.
"""
def __init__(self, json_data):
self.id = json_data['id']
self.kind = json_data['kind']
self.name = json_data['name']
self.description = json_data['description']
self.is_deleted = json_data['is_deleted']
self.children = []
def add_child(self, child):
self.children.append(child)
def get_paths(self):
paths = set()
for child in self.children:
paths.update(child.get_paths(''))
return paths
def __str__(self):
return 'project: {} id:{} {}'.format(self.name, self.id, self.children)
class RemoteFolder(object):
"""
Folder data from a remote store project_id_children or folder_id_children request.
Represents a leaf or branch in a project tree.
"""
def __init__(self, json_data):
self.id = json_data['id']
self.kind = json_data['kind']
self.name = json_data['name']
self.is_deleted = json_data['is_deleted']
self.children = []
def add_child(self, child):
self.children.append(child)
def get_paths(self, parent):
paths = set()
my_path = parent + '/' + self.name
paths.add(my_path)
for child in self.children:
paths.update(child.get_paths(my_path))
return paths
def __str__(self):
return 'folder: {} id:{} {}'.format(self.name, self.id, self.children)
class RemoteFile(object):
"""
File data from a remote store project_id_children or folder_id_children request.
Represents a leaf in a project tree.
"""
def __init__(self, json_data):
self.id = json_data['id']
self.kind = json_data['kind']
self.name = json_data['name']
self.is_deleted = json_data['is_deleted']
self.size = json_data['upload']['size']
def get_paths(self, parent):
paths = set()
paths.add(parent + '/' + self.name)
return paths
def __str__(self):
return 'file: {} id:{} size:{}'.format(self.name, self.id, self.size)
class FileOnDisk(object):
"""Return a chunks lazily."""
def __init__(self, dsa, local_file):
self.dsa = dsa
self.local_file = local_file
self.filename = local_file.path
self.content_type = local_file.mimetype
self.chunk_num = 0
self.upload_id = None
def upload(self, project_id, parent_kind, parent_id):
size = self.local_file.size
(hash_alg, hash_value) = self.local_file.get_hashpair()
name = self.local_file.name
resp = self.dsa.create_upload(project_id, name, self.content_type, size, hash_value, hash_alg)
self.upload_id = resp.json()['id']
self._send_file_chunks()
self.dsa.complete_upload(self.upload_id)
result = self.dsa.create_file(parent_kind, parent_id, self.upload_id)
return result.json()['id']
def _send_file_chunks(self):
self.local_file.process_chunks(self.dsa.bytes_per_chunk, self.process_chunk)
def process_chunk(self, chunk, chunk_hash_alg, chunk_hash_value):
resp = self.dsa.create_upload_url(self.upload_id, self.chunk_num, len(chunk),
chunk_hash_value, chunk_hash_alg)
if resp.status_code == 200:
self._send_file_external(resp.json(), chunk)
self.chunk_num += 1
else:
raise ValueError("Failed to retrieve upload url status:" + str(resp.status_code))
def _send_file_external(self, url_json, chunk):
http_verb = url_json['http_verb']
host = url_json['host']
url = url_json['url']
http_headers = url_json['http_headers']
resp = self.dsa.send_external(http_verb, host, url, http_headers, chunk)
if resp.status_code != 200 and resp.status_code != 201:
raise ValueError("Failed to send file to external store. Error:" + str(resp.status_code))
def _send_file_external(self, url_json, chunk):
http_verb = url_json['http_verb']
host = url_json['host']
url = url_json['url']
http_headers = url_json['http_headers']
resp = self.dsa.send_external(http_verb, host, url, http_headers, chunk)
if resp.status_code != 200 and resp.status_code != 201:
raise ValueError("Failed to send file to external store. Error:" + str(resp.status_code))
class RemoteContentSender(object):
def __init__(self, data_service, project_id, project_name, watcher):
self.data_service = data_service
self.project_id = project_id
self.project_name = project_name
self.watcher = watcher
def visit_project(self, item, parent):
if not item.remote_id:
self.watcher.sending_item(item)
result = self.data_service.create_project(self.project_name, self.project_name)
item.remote_id = result.json()['id']
item.sent_to_remote = True
self.project_id = item.remote_id
def visit_folder(self, item, parent):
if not item.remote_id:
self.watcher.sending_item(item)
result = self.data_service.create_folder(item.name, parent.kind, parent.remote_id)
item.remote_id = result.json()['id']
item.sent_to_remote = True
def visit_file(self, item, parent):
# Always sending files right, no way to know if different without downloading.
self.watcher.sending_item(item)
file_on_disk = FileOnDisk(self.data_service, item)
item.remote_id = file_on_disk.upload(self.project_id, parent.kind, parent.remote_id)
item.sent_to_remote = True
| [
"johnbradley2008@gmail.com"
] | johnbradley2008@gmail.com |
d022fe38183e5587f2f636e2c586849ef8486f56 | 8b5e08a5c2ae9779a362e4a1eb15e9205585288b | /mysite/account/views.py | a8780cbdccf21ad701a253bed72a0ae7507e6d5c | [] | no_license | wuchunlongcom/admin-upimg | 2b6dcc453e91040f0e9154d6255afdcf9aaa4e62 | 35e0dd0584d7e090d8922de704cd26ff0543679e | refs/heads/master | 2022-05-02T02:59:18.691844 | 2020-02-21T10:23:17 | 2020-02-21T10:23:17 | 240,830,450 | 0 | 0 | null | 2022-04-22T23:02:11 | 2020-02-16T04:21:17 | JavaScript | UTF-8 | Python | false | false | 805 | py | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .models import Image
from myAPI.pageAPI import djangoPage, PAGE_NUM
# http://localhost:8000/
#@login_required
def index(request):
meg = '最简单代码,实现图像文件上传。 <br>登录Admin后台, 上传图像文件并在前台显示图像。<br>\
用户名/密码: admin/admin'
return render(request, 'account/index.html', context=locals())
# 显示图片 http://localhost:8000/upload/pic/
def upload_pic(request, page):
pic_list = Image.objects.all()
pic_list, pageList, num_pages, page = djangoPage(pic_list, page, PAGE_NUM)
offset = PAGE_NUM * (page - 1)
return render(request,'account/image.html', context=locals()) | [
"wcl6005@163.com"
] | wcl6005@163.com |
a0cfe1555eae52ff7fc71807259a1992ae6af19e | 8163d8f03aea22cb4fa1e60d809781049fff4bb4 | /MODEL11/first/urls.py | 64c7bb7d97725ed170ff93fe6248f7b4ddae533c | [] | no_license | shubham454/Django-Devlopment | 694b973d31a82d2ded11f95138bd766130d7d3c9 | 43a2c3b98dbe9f582f2394fcfb3beb133c37b145 | refs/heads/master | 2022-12-04T14:34:05.093402 | 2020-08-13T18:35:33 | 2020-08-13T18:35:33 | 287,353,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.urls import path
from first import views
urlpatterns = [
path('list/',views.GetBankData.as_view()),
path('add/', views.AddBankData.as_view()),
path('<int:ano>/', views.DeleteView.as_view()),
path('update/<int:ano>/', views.UpdateView.as_view()),
]
| [
"moreshubham203@gmail.com"
] | moreshubham203@gmail.com |
0df4506dacd545abd91b163d9de4b2880bf748cb | 753938feae47c1ce8edeaa35099c634fbafa3b99 | /store/store_convergence.py | 569d2102921bf041a6e060c55c2a75cb8a7ce3c3 | [
"MIT"
] | permissive | dayoladejo/SwarmOptimization | c11cc799ec5a16a3a341ea0f2b5b9df1ea5f65d9 | 5445b6f90ab49339ca0fdb71e98d44e6827c95a8 | refs/heads/main | 2023-07-17T09:14:16.266657 | 2021-09-10T17:10:56 | 2021-09-10T17:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,228 | py | #
# file: store_convergence.py
#
# Plot store convergence as function of iteration.
#
# RTK, 24-Sep-2020
# Last update: 24-Sep-2020
#
################################################################
import pickle
import numpy as np
import matplotlib.pylab as plt
def geny(m,gbest,giter):
y = np.zeros(m)
for i in range(len(giter)):
y[giter[i]:] = -gbest[i]
return y
# DE
d = pickle.load(open("results/de_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="P", linestyle='none', color='k', label="DE")
plt.plot(x,y, color='k')
# PSO
d = pickle.load(open("results/pso_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="s", linestyle='none', color='k', label="PSO")
plt.plot(x,y, color='k')
# GWO
d = pickle.load(open("results/gwo_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="<", linestyle='none', color='k', label="GWO")
plt.plot(x,y, color='k')
# Jaya
d = pickle.load(open("results/jaya_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="*", linestyle='none', color='k', label="Jaya")
plt.plot(x,y, color='k')
# GA
d = pickle.load(open("results/ga_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker=">", linestyle='none', color='k', label="GA")
plt.plot(x,y, color='k')
# RO
d = pickle.load(open("results/ro_results.pkl","rb"))
gbest, giter, miter = d["gbest"], d["giter"], d["max_iter"]
x,y = range(miter), geny(miter, gbest, giter)
plt.plot(x[::200],y[::200], marker="o", linestyle='none', color='k', label="RO")
plt.plot(x,y, color='k')
plt.legend(loc="lower right")
plt.xlabel("Iteration")
plt.ylabel("Revenue")
plt.ylim((185,255))
plt.tight_layout(pad=0, w_pad=0, h_pad=0)
plt.savefig("store_convergence.png", dpi=300)
plt.show()
| [
"oneelkruns@hotmail.com"
] | oneelkruns@hotmail.com |
03139a6a7f7c39063ba8cd75ec2ed4dc60c632a8 | 5619eb14e8c46a70ba228f96c4afcebbae557189 | /url_tester/migrations/0004_auto_20190107_1552.py | cb82a2213547b9a90915b99fee210e96dd484ea7 | [] | no_license | eduarde/URLTester | 3d7b6be6f43886559264189f70f126c3ee74678d | 32c158943b4892b7f00d328ed26d63d813287106 | refs/heads/master | 2020-04-15T05:36:25.856274 | 2019-02-06T13:54:29 | 2019-02-06T13:54:29 | 164,430,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 2.1.5 on 2019-01-07 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('url_tester', '0003_auto_20190107_1551'),
]
operations = [
migrations.AlterField(
model_name='url',
name='name',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"eduard.erja@gmail.com"
] | eduard.erja@gmail.com |
e6643bc565b34a1eaed02f17519df6cbdb69e7a0 | 23fddc940a266c2d1d0e0b1687c36cdbcc9d54d9 | /shared/data_collection/scribeutil.py | 94a572426b9ecabc72323e87e3a64c7180ca6ad3 | [] | no_license | Cuick/traversing | 210fcfb1c780037de59343fffeb4fa4d3f2eae32 | c78982580af7f63c8bff4dcb37005b7f7c682b5b | refs/heads/master | 2021-01-10T17:38:37.899460 | 2016-11-18T06:06:55 | 2016-11-18T06:06:55 | 55,397,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | # -*- coding:utf-8 -*-
"""
created by server on 14-8-27下午4:16.
"""
try:
from scribe import scribe
from thrift.transport import TTransport, TSocket
from thrift.protocol import TBinaryProtocol
SCRIBE_AVAILABLE = True # 检查scribe相关库正常,不要影响业务系统
except ImportError, e:
print '<------->'
print e
SCRIBE_AVAILABLE = False
class Singleton(type):
'''this is a meta class for Singleton,just ommit it '''
def __init__(cls, name, bases, dic):
super(Singleton, cls).__init__(name, bases, dic)
cls.instance = None
def __call__(cls, *args, **kwargs): # @NoSelf
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls.instance
class _Transport(object):
'''
use this class as a raw socket
'''
def __init__(self, host, port, timeout=None, unix_socket=None):
self.host = host
self.port = port
self.timeout = timeout # ms
self._unix_socket = unix_socket
self._socket = TSocket.TSocket(self.host, self.port, self._unix_socket)
self._transport = TTransport.TFramedTransport(self._socket)
def __del__(self):
self._socket.close()
def connect(self):
try:
if self.timeout:
self._socket.settimeout(self.timeout)
if not self._transport.isOpen():
self._transport.open()
else:
pass
except Exception, e:
self.close()
def isOpen(self):
return self._transport.isOpen()
def get_trans(self):
return self._transport
def close(self):
self._transport.close()
import time
class ScribeClient(object):
'''a simple scribe client'''
__metaclass__ = Singleton
def __init__(self, host, port, timeout=None, unix_socket=None):
self._transObj = _Transport(host, port, timeout=timeout, unix_socket=unix_socket)
self._protocol = TBinaryProtocol.TBinaryProtocol(trans=self._transObj.get_trans(), strictRead=False, strictWrite=False)
self.client = scribe.Client(iprot=self._protocol, oprot=self._protocol)
self._transObj.connect()
def log(self, category, message):
'''specify a category and send the message'''
message = time.strftime('%H:%M:%S') + '\t' + message # add timestamp before log
log_entry = scribe.LogEntry(category=category, message=message)
try:
self.client.Log([log_entry])
except Exception, e:
self._transObj.close()
self._transObj.connect()
if self._transObj.isOpen():
self.client.Log([log_entry])
else:
pass
@classmethod
def instance(cls):
'''create a Scribe Client'''
if not hasattr(cls, '_instance'):
cls._instance = cls()
| [
"zxzxck@163.com"
] | zxzxck@163.com |
bd9632f645cbdef8be36b04fe17ef06be5f9b4d1 | 280079e18b506ec7ed85a49587e10db795947922 | /accounting/libs/templatetags/introspection_filters.py | d7e06cce56fa0b40cd5dd3cd0442d8fda47ee202 | [
"MIT"
] | permissive | Abdur-rahmaanJ/django-accounting | 6a4d30fd0070f4b11b50490a801b0dece58dd474 | 4ec094544873843722db964ea9283a7947c4ba32 | refs/heads/master | 2020-03-22T17:02:47.120624 | 2018-07-10T10:49:19 | 2018-07-10T10:49:19 | 140,368,614 | 0 | 0 | MIT | 2018-07-10T10:49:20 | 2018-07-10T02:52:28 | Python | UTF-8 | Python | false | false | 920 | py | from django import template
from django.forms import ModelForm, BaseFormSet
from django.db.models import Model
from django_select2.fields import (
AutoModelSelect2Field,
AutoModelSelect2MultipleField)
register = template.Library()
@register.filter
def get_model_verbose_name(instance):
if isinstance(instance, Model):
return instance._meta.verbose_name.title()
return '<unknown>'
@register.filter
def get_form_model_verbose_name(instance):
if isinstance(instance, ModelForm):
return instance._meta.model._meta.verbose_name.title()
if isinstance(instance, BaseFormSet):
return instance.model._meta.verbose_name_plural.title()
return '<unknown>'
@register.filter
def is_select2_field(form, field):
select2_classes = (AutoModelSelect2Field, AutoModelSelect2MultipleField)
res = any(isinstance(field.field, cls) for cls in select2_classes)
return res
| [
"dulacpier@gmail.com"
] | dulacpier@gmail.com |
f8619e156fdfa736e2afaac94ee691bc3ff1978b | e473f04f5dd4e40393bc1047e6f326e589204530 | /law/notification.py | 880ccec0117a02ee59dc937376dc76f77458dd42 | [
"BSD-3-Clause"
] | permissive | yrath/law | bc9848aa1238538af7cb9a335e0fa9fad90bbecf | 807306d6b2113e6c546c01fcaa134bba551b4759 | refs/heads/master | 2020-07-15T11:01:30.517171 | 2019-12-12T11:56:41 | 2019-12-12T11:56:41 | 205,548,161 | 0 | 0 | BSD-3-Clause | 2019-08-31T13:29:23 | 2019-08-31T13:29:22 | null | UTF-8 | Python | false | false | 1,203 | py | # coding: utf-8
"""
Notification functions.
"""
__all__ = ["notify_mail"]
import logging
from law.config import Config
from law.util import send_mail
logger = logging.getLogger(__name__)
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None,
**kwargs):
"""
Mail notification method taking a *title* and a string *message*. *recipient*, *sender*,
*smtp_host* and *smtp_port* default to the configuration values in the [notifications] section.
"""
cfg = Config.instance()
if not recipient:
recipient = cfg.get_expanded("notifications", "mail_recipient")
if not sender:
sender = cfg.get_expanded("notifications", "mail_sender")
if not smtp_host:
smtp_host = cfg.get_expanded("notifications", "mail_smtp_host")
if not smtp_port:
smtp_port = cfg.get_expanded("notifications", "mail_smtp_port")
if not recipient or not sender:
logger.warning("cannot send mail notification, recipient ({}) or sender ({}) empty".format(
recipient, sender))
return False
return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
| [
"marcelrieger@me.com"
] | marcelrieger@me.com |
fc087e51acf67938797c340946b8cda20e017f72 | e65a4dbfbfb0e54e59787ba7741efee12f7687f3 | /archivers/py-brotli/files/patch-setup.py | 9f97287a706fdfc4c9aa658f8632f588dd3bf945 | [
"BSD-2-Clause"
] | permissive | freebsd/freebsd-ports | 86f2e89d43913412c4f6b2be3e255bc0945eac12 | 605a2983f245ac63f5420e023e7dce56898ad801 | refs/heads/main | 2023-08-30T21:46:28.720924 | 2023-08-30T19:33:44 | 2023-08-30T19:33:44 | 1,803,961 | 916 | 918 | NOASSERTION | 2023-09-08T04:06:26 | 2011-05-26T11:15:35 | null | UTF-8 | Python | false | false | 3,962 | py | --- setup.py.orig 2020-08-27 06:24:08 UTC
+++ setup.py
@@ -25,7 +25,7 @@ CURR_DIR = os.path.abspath(os.path.dirname(os.path.rea
def get_version():
""" Return BROTLI_VERSION string as defined in 'common/version.h' file. """
- version_file_path = os.path.join(CURR_DIR, 'c', 'common', 'version.h')
+ version_file_path = os.path.join(CURR_DIR, 'common', 'version.h')
version = 0
with open(version_file_path, 'r') as f:
for line in f:
@@ -181,92 +181,17 @@ EXT_MODULES = [
'_brotli',
sources=[
'python/_brotli.cc',
- 'c/common/constants.c',
- 'c/common/context.c',
- 'c/common/dictionary.c',
- 'c/common/platform.c',
- 'c/common/transform.c',
- 'c/dec/bit_reader.c',
- 'c/dec/decode.c',
- 'c/dec/huffman.c',
- 'c/dec/state.c',
- 'c/enc/backward_references.c',
- 'c/enc/backward_references_hq.c',
- 'c/enc/bit_cost.c',
- 'c/enc/block_splitter.c',
- 'c/enc/brotli_bit_stream.c',
- 'c/enc/cluster.c',
- 'c/enc/command.c',
- 'c/enc/compress_fragment.c',
- 'c/enc/compress_fragment_two_pass.c',
- 'c/enc/dictionary_hash.c',
- 'c/enc/encode.c',
- 'c/enc/encoder_dict.c',
- 'c/enc/entropy_encode.c',
- 'c/enc/fast_log.c',
- 'c/enc/histogram.c',
- 'c/enc/literal_cost.c',
- 'c/enc/memory.c',
- 'c/enc/metablock.c',
- 'c/enc/static_dict.c',
- 'c/enc/utf8_util.c',
],
depends=[
- 'c/common/constants.h',
- 'c/common/context.h',
- 'c/common/dictionary.h',
- 'c/common/platform.h',
- 'c/common/transform.h',
- 'c/common/version.h',
- 'c/dec/bit_reader.h',
- 'c/dec/huffman.h',
- 'c/dec/prefix.h',
- 'c/dec/state.h',
- 'c/enc/backward_references.h',
- 'c/enc/backward_references_hq.h',
- 'c/enc/backward_references_inc.h',
- 'c/enc/bit_cost.h',
- 'c/enc/bit_cost_inc.h',
- 'c/enc/block_encoder_inc.h',
- 'c/enc/block_splitter.h',
- 'c/enc/block_splitter_inc.h',
- 'c/enc/brotli_bit_stream.h',
- 'c/enc/cluster.h',
- 'c/enc/cluster_inc.h',
- 'c/enc/command.h',
- 'c/enc/compress_fragment.h',
- 'c/enc/compress_fragment_two_pass.h',
- 'c/enc/dictionary_hash.h',
- 'c/enc/encoder_dict.h',
- 'c/enc/entropy_encode.h',
- 'c/enc/entropy_encode_static.h',
- 'c/enc/fast_log.h',
- 'c/enc/find_match_length.h',
- 'c/enc/hash.h',
- 'c/enc/hash_composite_inc.h',
- 'c/enc/hash_forgetful_chain_inc.h',
- 'c/enc/hash_longest_match64_inc.h',
- 'c/enc/hash_longest_match_inc.h',
- 'c/enc/hash_longest_match_quickly_inc.h',
- 'c/enc/hash_rolling_inc.h',
- 'c/enc/hash_to_binary_tree_inc.h',
- 'c/enc/histogram.h',
- 'c/enc/histogram_inc.h',
- 'c/enc/literal_cost.h',
- 'c/enc/memory.h',
- 'c/enc/metablock.h',
- 'c/enc/metablock_inc.h',
- 'c/enc/params.h',
- 'c/enc/prefix.h',
- 'c/enc/quality.h',
- 'c/enc/ringbuffer.h',
- 'c/enc/static_dict.h',
- 'c/enc/static_dict_lut.h',
- 'c/enc/utf8_util.h',
- 'c/enc/write_bits.h',
],
include_dirs=[
- 'c/include',
+ '%%LOCALBASE%%/include',
+ ],
+ libraries=[
+ 'brotlicommon', 'brotlidec', 'brotlienc',
+ ],
+ library_dirs=[
+ '%%LOCALBASE%%/lib',
],
language='c++'),
]
| [
"sunpoet@FreeBSD.org"
] | sunpoet@FreeBSD.org |
257fdf4bf5c379f7a73f67693f5512683c6c3822 | 1a6cbe035adb81fea66615323a836327d06f9e72 | /year2020/run.py | 9cf6233087187489d27412c4e5c11f9fc5271ba4 | [] | no_license | ecurtin2/advent-of-code | a2607d857408d722b07d4cfc66855edcd019cda7 | 216db926c5bab9bf1ec3cac2aa912c1a2ff70d6c | refs/heads/main | 2022-12-15T10:06:51.202608 | 2022-12-14T17:28:15 | 2022-12-14T17:28:15 | 160,612,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | import importlib
from dataclasses import dataclass
from pathlib import Path
from statistics import mean, stdev
from time import time
from typing import Any, List, Optional, Tuple, get_type_hints
import cattr
import click
AVAILABLE_DAYS: List[int] = sorted(
int(str(p.name).replace("d", "").replace(".py", ""))
for p in Path(__file__).parent.rglob("d*.py")
)
cattr.register_structure_hook(List[int], lambda s, _: [int(l) for l in s.splitlines()])
cattr.register_structure_hook(List[str], lambda s, _: s.splitlines())
def timeit(f, *args, **kwargs) -> Tuple[float, float]:
times = []
for _ in range(1):
begin = time()
f(*args, **kwargs)
times.append(time() - begin)
return mean(times), stdev(times)
@dataclass
class Run:
day: int
part: int
result: Optional[Any] = None
mean_duration_ms: Optional[float] = None
std_duration_ms: Optional[float] = None
def execute(self, timed: bool = False):
module = importlib.import_module(f"d{self.day}")
input_path = Path(f"inputs/d{self.day}p{self.part}.txt")
if (not input_path.is_file()) and (self.part != 1):
# fallback since sometimes same input reused.
input_path = Path(f"inputs/d{self.day}p1.txt")
try:
raw_str = input_path.read_text()
except FileNotFoundError:
raise FileNotFoundError(
f"No data file found for day {self.day} part {self.part}. Expected {input_path}"
)
func = getattr(module, f"part{self.part}")
typ = list(get_type_hints(func).values())[0]
inp = cattr.structure(raw_str, typ)
self.result = func(inp)
if timed:
m, s = timeit(func, inp)
self.mean_duration_ms = m * 1000
self.std_duration_ms = s * 1000
@click.command()
@click.option(
"--day",
"-d",
type=click.Choice([str(d) for d in AVAILABLE_DAYS] + ["all"]),
required=False,
default="all",
)
@click.option("--part", "-p", type=click.Choice(["1", "2"]), required=False)
@click.option("--timed/--no-timed", default=False)
def cli(day: int, part: Optional[int], timed: bool):
if part is None:
parts = [1, 2]
else:
parts = [part]
if day == "all":
runs = [Run(day=d, part=p) for d in AVAILABLE_DAYS for p in parts]
else:
runs = [Run(day=int(day), part=p) for p in parts]
last_day = -1
for run in runs:
run.execute(timed=timed)
if run.day != last_day:
print(f"\nDay {run.day}\n--------------------------------")
last_day = run.day
print(f"Part {run.part}:\n Result: {run.result}")
if run.mean_duration_ms:
print(
f" Time: {run.mean_duration_ms:07.4f} +/- {run.std_duration_ms:07.4f}ms"
)
if __name__ == "__main__":
cli()
| [
"ecurtin2@illinois.edu"
] | ecurtin2@illinois.edu |
b3066063ef59fcb92ab7a88089fc8ce41b881999 | bd1db30fd3c593e8dc4f2e21de630668456ed28f | /educational-resources/robotics/gym-gazebo-master/examples/turtlebot/circuit2_turtlebot_lidar_sarsa.py | ed521957b28cc965d5848020afaef371190c0b89 | [
"MIT",
"GPL-3.0-only"
] | permissive | vicb1/miscellaneous-notes | c50d759729b4d0067b3c4cb51a69350db5a941b9 | eb63641a8156a4dcd78924b5d0f6f0618479ceaf | refs/heads/master | 2023-05-11T06:04:03.254582 | 2023-05-09T12:34:18 | 2023-05-09T12:34:18 | 227,648,115 | 1 | 0 | MIT | 2022-12-11T17:00:50 | 2019-12-12T16:20:38 | Python | UTF-8 | Python | false | false | 2,709 | py | #!/usr/bin/env python
import gym
from gym import wrappers
import gym_gazebo
import time
import numpy
import random
import time
import liveplot
import sarsa
if __name__ == '__main__':
env = gym.make('GazeboCircuit2TurtlebotLidar-v0')
outdir = '/tmp/gazebo_gym_experiments'
env = gym.wrappers.Monitor(env, outdir, force=True)
plotter = liveplot.LivePlot(outdir)
last_time_steps = numpy.ndarray(0)
sarsa = sarsa.Sarsa(actions=range(env.action_space.n),
epsilon=0.9, alpha=0.2, gamma=0.9)
initial_epsilon = sarsa.epsilon
epsilon_discount = 0.9986
start_time = time.time()
total_episodes = 10000
highest_reward = 0
for x in range(total_episodes):
done = False
cumulated_reward = 0 #Should going forward give more reward then L/R ?
observation = env.reset()
if sarsa.epsilon > 0.05:
sarsa.epsilon *= epsilon_discount
#render() #defined above, not env.render()
state = ''.join(map(str, observation))
for i in range(1500):
# Pick an action based on the current state
action = sarsa.chooseAction(state)
# Execute the action and get feedback
observation, reward, done, info = env.step(action)
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
nextAction = sarsa.chooseAction(nextState)
#sarsa.learn(state, action, reward, nextState)
sarsa.learn(state, action, reward, nextState, nextAction)
env._flush(force=True)
if not(done):
state = nextState
else:
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
if x%100==0:
plotter.plot(env)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
print ("EP: "+str(x+1)+" - [alpha: "+str(round(sarsa.alpha,2))+" - gamma: "+str(round(sarsa.gamma,2))+" - epsilon: "+str(round(sarsa.epsilon,2))+"] - Reward: "+str(cumulated_reward)+" Time: %d:%02d:%02d" % (h, m, s))
#Github table content
print ("\n|"+str(total_episodes)+"|"+str(sarsa.alpha)+"|"+str(sarsa.gamma)+"|"+str(initial_epsilon)+"*"+str(epsilon_discount)+"|"+str(highest_reward)+"| PICTURE |")
l = last_time_steps.tolist()
l.sort()
#print("Parameters: a="+str)
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| [
"vbajenaru@gmail.com"
] | vbajenaru@gmail.com |
52cff6065e5fb915053715a160ce650b4c2235d8 | 923f9270a12be35fdd297d8f27e522c601e94eab | /doc/slides/src/decay/src-decay/decay_mod_unittest.py | 807c314773b07bb8b1fb368b785a75891694aec8 | [] | no_license | t-bltg/INF5620 | a06b6e06b6aba3bc35e933abd19c58cd78584c1f | d3e000462302839b49693cfe06a2f2df924c5027 | refs/heads/master | 2021-05-31T00:41:41.624838 | 2016-03-22T09:29:00 | 2016-03-22T09:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,015 | py | import numpy as np
import matplotlib.pyplot as plt
import sys
from math import log
def solver(I, a, T, dt, theta):
"""
Solve u'=-a*u, u(0)=I, for t in (0,T] with steps of dt.
>>> u, t = solver(I=0.8, a=1.2, T=4, dt=0.5, theta=0.5)
>>> for t_n, u_n in zip(t, u):
... print 't=%.1f, u=%.14f' % (t_n, u_n)
t=0.0, u=0.80000000000000
t=0.5, u=0.43076923076923
t=1.0, u=0.23195266272189
t=1.5, u=0.12489758761948
t=2.0, u=0.06725254717972
t=2.5, u=0.03621291001985
t=3.0, u=0.01949925924146
t=3.5, u=0.01049960113002
t=4.0, u=0.00565363137770
"""
dt = float(dt) # avoid integer division
Nt = int(round(T/dt)) # no of time intervals
T = Nt*dt # adjust T to fit time step dt
u = np.zeros(Nt+1) # array of u[n] values
t = np.linspace(0, T, Nt+1) # time mesh
u[0] = I # assign initial condition
for n in range(0, Nt): # n=0,1,...,Nt-1
u[n+1] = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)*u[n]
return u, t
def exact_solution(t, I, a):
return I*np.exp(-a*t)
def explore(I, a, T, dt, theta=0.5, makeplot=True):
"""
Run a case with the solver, compute error measure,
and plot the numerical and exact solutions (if makeplot=True).
>>> for theta in 0, 0.5, 1:
... E = explore(I=1.9, a=2.1, T=5, dt=0.1, theta=theta,
... makeplot=False)
... print '%.10E' % E
...
7.3565079236E-02
2.4183893110E-03
6.5013039886E-02
"""
u, t = solver(I, a, T, dt, theta) # Numerical solution
u_e = exact_solution(t, I, a)
e = u_e - u
E = np.sqrt(dt*np.sum(e**2))
if makeplot:
plt.figure() # create new plot
t_e = np.linspace(0, T, 1001) # very fine mesh for u_e
u_e = exact_solution(t_e, I, a)
plt.plot(t, u, 'r--o') # red dashes w/circles
plt.plot(t_e, u_e, 'b-') # blue line for u_e
plt.legend(['numerical', 'exact'])
plt.xlabel('t')
plt.ylabel('u')
plt.title('Method: theta-rule, theta=%g, dt=%g' % \
(theta, dt))
theta2name = {0: 'FE', 1: 'BE', 0.5: 'CN'}
plt.savefig('%s_%g.png' % (theta2name[theta], dt))
plt.show()
return E
def define_command_line_options():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--I', '--initial_condition', type=float,
default=1.0, help='initial condition, u(0)',
metavar='I')
parser.add_argument('--a', type=float,
default=1.0, help='coefficient in ODE',
metavar='a')
parser.add_argument('--T', '--stop_time', type=float,
default=3.0, help='end time of simulation',
metavar='T')
parser.add_argument('--makeplot', action='store_true',
help='display plot or not')
parser.add_argument('--dt', '--time_step_values', type=float,
default=[0.5], help='time step values',
metavar='dt', nargs='+', dest='dt_values')
return parser
def read_command_line(use_argparse=True):
"""
Read parameters from the command line and return their
values as the sequence I, a, T, makeplot, dt_values.
>>> sys.argv[1:] = '1.2 0.9 4 True 0.1 0.05'.split()
>>> prms = read_command_line(use_argparse=False)
>>> print prms
(1.2, 0.9, 4.0, True, [0.1, 0.05])
>>> sys.argv[1:] = '--I 1.2 --a 0.9 --T 4 --makeplot '\
'--dt 0.1 0.05'.split()
>>> prms = read_command_line(use_argparse=True)
>>> print prms
(1.2, 0.9, 4.0, True, [0.1, 0.05])
"""
if use_argparse:
parser = define_command_line_options()
args = parser.parse_args()
return args.I, args.a, args.T, args.makeplot, args.dt_values
else:
if len(sys.argv) < 6:
print 'Usage: %s I a on/off dt1 dt2 dt3 ...' % \
sys.argv[0]; sys.exit(1)
I = float(sys.argv[1])
a = float(sys.argv[2])
T = float(sys.argv[3])
makeplot = sys.argv[4] in ('on', 'True')
dt_values = [float(arg) for arg in sys.argv[5:]]
return I, a, T, makeplot, dt_values
def main():
I, a, T, makeplot, dt_values = read_command_line()
r = {}
for theta in 0, 0.5, 1:
E_values = []
for dt in dt_values:
E = explore(I, a, T, dt, theta, makeplot=False)
E_values.append(E)
# Compute convergence rates
m = len(dt_values)
r[theta] = [log(E_values[i-1]/E_values[i])/
log(dt_values[i-1]/dt_values[i])
for i in range(1, m, 1)]
for theta in r:
print '\nPairwise convergence rates for theta=%g:' % theta
print ' '.join(['%.2f' % r_ for r_ in r[theta]])
return r
if __name__ == '__main__':
main()
| [
"hpl@simula.no"
] | hpl@simula.no |
0545fda410ccd767130446d92bd5540e4debe9be | c4c9fd6bb97f8d0d16de36f67365b76278c93959 | /proj/proj03/tests/q1_10.py | fa4a5049dd509f6c35ecf0afb423b3be88f203d0 | [] | no_license | ds-connectors/DATA-88-EconModels-sp20 | d71795dad299031add94e52a60206b5840b144c7 | 6a8297cf20f8afe8ef52e4bfa93aca3cfff1afc5 | refs/heads/master | 2023-08-30T03:47:51.417760 | 2020-04-27T19:35:16 | 2020-04-27T19:35:16 | 231,190,728 | 1 | 0 | null | 2023-08-14T21:55:19 | 2020-01-01T08:09:44 | Jupyter Notebook | UTF-8 | Python | false | false | 447 | py | test = { 'name': 'q1_10',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> -4000 <= default_beta <= -3000\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> 51000 <= default_alpha <= 52000\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"cpyles@berkeley.edu"
] | cpyles@berkeley.edu |
3a0dcd089e8fecfa5fd81aa3848aa8220b1e2507 | 0e820627e68413aebe27fbc32dde66a3c99651d3 | /flamingo/core/parser.py | 186d7d11e2fd1b1da243823e9c6d051bca7a2bdc | [
"Apache-2.0"
] | permissive | ejoerns/flamingo | 17de116008561a4a64613b2b6528eb6d52706281 | 1d61c99c9ad34dd0a2a652f80783226051e07238 | refs/heads/master | 2020-04-11T02:53:47.295752 | 2018-12-14T11:17:23 | 2018-12-14T11:37:39 | 161,460,547 | 0 | 0 | Apache-2.0 | 2018-12-12T09:00:34 | 2018-12-12T09:00:34 | null | UTF-8 | Python | false | false | 2,022 | py | from configparser import ConfigParser, Error as ConfigParserError
from io import StringIO
import os
from flamingo.core.errors import FlamingoError
class ParsingError(FlamingoError):
pass
class ContentParser:
FILE_EXTENSIONS = []
def __init__(self):
self.configparser = ConfigParser(interpolation=None)
def parse_meta_data(self, fp, content):
meta_data_buffer = StringIO('[meta]\n')
meta_data_buffer.read()
empty_lines = 0
while True:
line = fp.readline()
if not line: # eof
break
if not line.strip():
empty_lines += 1
else:
empty_lines = 0
if empty_lines == 2:
break
meta_data_buffer.write(line)
meta_data_buffer.seek(0)
self.configparser.clear()
self.configparser.read_file(meta_data_buffer)
for option in self.configparser.options('meta'):
content[option] = self.configparser.get('meta', option)
def parse(self, fp, content):
self.parse_meta_data(fp, content)
content['content_body'] = fp.read().strip()
class FileParser:
def __init__(self):
self._parsers = []
def add_parser(self, parser):
self._parsers.append(parser)
def find_parser(self, extension):
for parser in self._parsers:
if extension in parser.FILE_EXTENSIONS:
return parser
def get_extensions(self):
return sum([i.FILE_EXTENSIONS for i in self._parsers], [])
def parse(self, path, content):
extension = os.path.splitext(path)[1][1:]
parser = self.find_parser(extension)
if not parser:
raise ParsingError(
"file extension '{}' is not supported".format(extension))
try:
parser.parse(open(path, 'r'), content) # FIXME: chardet
except ConfigParserError:
raise ParsingError('Metadata seem to be broken')
| [
"f.scherf@pengutronix.de"
] | f.scherf@pengutronix.de |
c29e057c72aa360392571f06201dd466d58bf1fa | 79f1e7932c27eb01483f8764720c672242052e1f | /training_horovod_single_aa.py | 4d7e93d81b64226a254dda09ec2813e09755c295 | [] | no_license | pk-organics/uniparc_modeling | 3b16ae5b85dc178fdcab4be3b4ddbdab02c80897 | ab9faaad00c20416ea2ac86f6f91b83f86ffb7a4 | refs/heads/master | 2023-02-13T19:58:29.841889 | 2019-12-05T18:53:05 | 2019-12-05T18:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,457 | py | import os
import argparse
parser = argparse.ArgumentParser(description='BERT model training')
parser.add_argument('--modelName', default='bert', help='model name for directory saving')
parser.add_argument('--batchSize', type=int, default=20, help='batch size per gpu')
parser.add_argument('--stepsPerEpoch', type=int, default=10000, help='steps per epoch')
parser.add_argument('--warmup', type=int, default=16000, help='warmup steps')
arguments = parser.parse_args()
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from bert.dataset import create_masked_input_dataset
from bert.layers import (PositionEmbedding, Attention, Transformer, TokenEmbedding, Bias,
gelu, masked_sparse_cross_entropy_loss, InverseSquareRootSchedule,
initializer, Projection)
import horovod.tensorflow.keras as hvd
# Horovod: initialize Horovod.
hvd.init()
# Print runtime config on head node
if hvd.rank() == 0:
print(arguments)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
# import tensorflow_addons as tfa
from tensorflow.keras import layers
vocab_size = 22
max_seq_len = 1024
def encode(line_tensor):
line = line_tensor.numpy().decode('utf8')
if len(line) > max_seq_len:
offset = np.random.randint(
low=0, high=len(line) - max_seq_len + 1)
line = line[offset:(offset + max_seq_len)]
vocab = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V',
'W', 'Y']
replacement_dict = {key: i + 2 for i, key in enumerate(vocab)}
return np.asarray([replacement_dict[item] for item in line])
def encode_tf(line_tensor):
return tf.py_function(encode, inp=[line_tensor], Tout=[tf.int32,])
training_data = create_masked_input_dataset(
encode_fn=encode_tf,
sequence_path='/projects/bpms/pstjohn/uniparc/sequences_train.txt',
max_sequence_length=max_seq_len,
batch_size=arguments.batchSize,
buffer_size=1024,
vocab_size=vocab_size,
mask_index=4,
vocab_start=5,
fix_sequence_length=True,
shard_num_workers=hvd.size(),
shard_worker_index=hvd.rank())
training_data.repeat().prefetch(tf.data.experimental.AUTOTUNE)
valid_data = create_masked_input_dataset(
encode_fn=encode_tf,
sequence_path='/projects/bpms/pstjohn/uniparc/sequences_valid.txt',
max_sequence_length=max_seq_len,
batch_size=arguments.batchSize,
buffer_size=1024,
vocab_size=vocab_size,
mask_index=4,
vocab_start=5,
fix_sequence_length=True,
shard_num_workers=hvd.size(),
shard_worker_index=hvd.rank())
valid_data.repeat().prefetch(tf.data.experimental.AUTOTUNE)
embedding_dimension = 128
model_dimension = 768
transformer_dimension = 4 * model_dimension
num_attention_heads = model_dimension // 64
num_transformer_layers = 12
# embedding_dimension = 32
# model_dimension = 64
# num_attention_heads = model_dimension // 16
# num_transformer_layers = 4
dropout_rate = 0.
# Horovod: adjust learning rate based on number of GPUs.
learning_rate = 1E-4
inputs = layers.Input(shape=(max_seq_len,), dtype=tf.int32, batch_size=None)
input_mask = layers.Input(shape=(max_seq_len,), dtype=tf.bool, batch_size=None)
token_embedding_layer = TokenEmbedding(
vocab_size, embedding_dimension, embeddings_initializer=initializer(), mask_zero=True)
token_embeddings = token_embedding_layer(inputs)
position_embeddings = PositionEmbedding(
max_seq_len + 1, embedding_dimension, embeddings_initializer=initializer(),
mask_zero=True)(inputs)
embeddings = layers.Add()([token_embeddings, position_embeddings])
embeddings = Projection(model_dimension, dropout_rate, use_residual=False)(embeddings)
transformer = Transformer(num_attention_heads, transformer_dimension, dropout=dropout_rate)
for i in range(num_transformer_layers):
embeddings = transformer(embeddings)
out = layers.Dense(embedding_dimension, activation=gelu, kernel_initializer=initializer())(embeddings)
out = token_embedding_layer(out, transpose=True)
out = Bias()([out, input_mask])
model = tf.keras.Model([inputs, input_mask], [out], name='model')
if hvd.rank() == 0:
model.summary()
# Horovod: add Horovod DistributedOptimizer.
# opt = tfa.optimizers.AdamW(weight_decay=0.01, learning_rate=learning_rate)
opt = tf.optimizers.Adam(learning_rate=learning_rate)
opt = hvd.DistributedOptimizer(opt)
from tensorflow.python.keras.metrics import MeanMetricWrapper
def exponentiated_sparse_categorical_crossentropy(*args, **kwargs):
return tf.exp(tf.losses.sparse_categorical_crossentropy(*args, **kwargs))
class ExponentiatedSparseCategoricalCrossentropy(MeanMetricWrapper):
def __init__(self,
name='exponentiated_sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(ExponentiatedSparseCategoricalCrossentropy, self).__init__(
exponentiated_sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(),
ExponentiatedSparseCategoricalCrossentropy(from_logits=True)],
optimizer=opt,
experimental_run_tf_function=False)
model_name = arguments.modelName
checkpoint_dir = f'{model_name}_checkpoints'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}.h5")
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
InverseSquareRootSchedule(learning_rate=learning_rate, warmup_updates=arguments.warmup),
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
callbacks.append(tf.keras.callbacks.CSVLogger(f'{checkpoint_dir}/log.csv'))
callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix))
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
model.fit(training_data, steps_per_epoch=arguments.stepsPerEpoch, epochs=500,
verbose=verbose, validation_data=valid_data, validation_steps=100,
callbacks=callbacks)
| [
"peterc.stjohn@gmail.com"
] | peterc.stjohn@gmail.com |
e312d6d3314dc67c99a82a7d258f106f69278143 | c70aa626cb96e5a62b8f4789c6bb885229375134 | /ProjectOne/card.py | 4cf7f030878e1e681a71fdbc247088b3836ae994 | [] | no_license | BaranAkcakaya/PythonProject | 279b62907ba9598199fdf1f7642bfb3af9aa98be | 486389196b22df7134b02190de636c949fbb08a1 | refs/heads/main | 2023-01-08T14:51:14.735863 | 2020-11-02T10:34:15 | 2020-11-02T10:34:15 | 309,336,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | #SUIT
HEARTS = 0 #RED
DIAMONDS = 1 #RED
CLUBS = 2 #BLACK
SPADES = 3 #BLACk
#RANK
TWO = 0
THREE = 1
FOUR = 2
FIVE = 3
SIX = 4
SEVEN = 5
EIGHT = 6
NINE = 7
TEN = 8
JACK = 9
QUEEN = 10
KING = 11
ACE = 12
def get_suit(card):
return int((card % 4) - 1) #Suit 4 oldugu için 4 e göre mod alarak hangi suit oldugunu bulabşliriz -1 sebebi ise 0 danbaslıyor
def get_rank(card):
return int((card / 4)) #Burada 4'e göremod alarak hangi dörtlüde olduğunu buluyoruz
def same_suit(card1, card2):
if(get_suit(card1) == get_suit(card2)):
return True
else:
return False
def same_rank(card1, card2):
if(get_rank(card1) == get_rank(card2)):
return True
else:
return False
def same_color_suit(card1, card2):
suit1 = get_suit(card1) #Hazır fonksiyonları kullandık
suit2 = get_suit(card2)
if(suit1 == suit2):
return True
else:
if(suit1<2 and suit2<2):
return True
elif(suit1>1 and suit2>1):
return True
else:
return False
print(get_suit(25))
print(get_rank(19))
print(same_rank(11, 32))
print(same_suit(17, 33))
print(same_color_suit(35, 7)) | [
"noreply@github.com"
] | BaranAkcakaya.noreply@github.com |
701c7372e4483add670bc094a28089806a2fd902 | 0dfa97730b9ad9c077868a045d89cc0d4b09f433 | /tests/integration/goldens/redis/samples/generated_samples/redis_generated_redis_v1_cloud_redis_create_instance_sync.py | d2e83c8ed0211cc443a3c114985748bbc9997b66 | [
"Apache-2.0"
] | permissive | anukaal/gapic-generator-python | 546c303aaf2e722956133b07abb0fb1fe581962f | e3b06895fa179a2038ee2b28e43054e1df617975 | refs/heads/master | 2023-08-24T23:16:32.305652 | 2021-10-09T15:12:14 | 2021-10-09T15:12:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,773 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-redis
# [START redis_generated_redis_v1_CloudRedis_CreateInstance_sync]
from google.cloud import redis_v1
def sample_create_instance():
"""Snippet for create_instance"""
# Create a client
client = redis_v1.CloudRedisClient()
# Initialize request argument(s)
instance = redis_v1.Instance()
instance.name = "name_value"
instance.tier = "STANDARD_HA"
instance.memory_size_gb = 1499
request = redis_v1.CreateInstanceRequest(
parent="projects/{project}/locations/{location}",
instance_id="instance_id_value",
instance=instance,
)
# Make the request
operation = client.create_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
print(response)
# [END redis_generated_redis_v1_CloudRedis_CreateInstance_sync]
| [
"noreply@github.com"
] | anukaal.noreply@github.com |
4b52610a596acae2c84caeeeab48b5fe380dafce | c4a8f3200add74f4c42fe34b2f3b284d6249a481 | /sciwx/demo/mesh2_mesh_demo.py | 076d254d04a5dd678a223fd6c388ca83397f2f19 | [
"BSD-2-Clause"
] | permissive | pengguanjun/imagepy | 1908e83a7ec2a6472524f443aefeaade12c2b649 | d96ef98c2c3e93d368131fd2753bce164e1247cd | refs/heads/master | 2022-12-15T11:48:04.841436 | 2020-09-14T11:59:49 | 2020-09-14T11:59:49 | 275,398,356 | 1 | 0 | NOASSERTION | 2020-06-27T15:18:59 | 2020-06-27T15:18:58 | null | UTF-8 | Python | false | false | 1,249 | py | import sys, wx
sys.path.append('../../')
from sciwx.mesh import Canvas3D, MCanvas3D, MeshSet
from sciapp.util import surfutil
from sciapp.object import Surface
from sciwx.mesh import Canvas3DFrame, Canvas3DNoteBook, Canvas3DNoteFrame
vts, fs, ns, cs = surfutil.build_ball((100,100,100),50, (1,0,0))
def add_with_para():
cnf = Canvas3DFrame(None)
surf = Surface(vts, fs, ns, cs, mode='grid')
cnf.add_surf('gridball', surf)
cnf.Show()
def mesh_obj_test():
cnf = Canvas3DFrame(None)
meshes = MeshSet()
vts, fs, ns, cs = surfutil.build_ball((100,100,100),50, (1,0,0))
redball = Surface(vts, fs, ns, cs)
meshes.add_surf('redball', redball)
vts, fs, ns, cs = surfutil.build_ball((300,100,100),50, (1,1,0))
yellowball = Surface(vts, fs, ns, cs, mode='grid')
meshes.add_surf('yellowball', yellowball)
hideball = Surface(vts, fs, ns, cs)
vts, fs, ns, cs = surfutil.build_ball((300,-300,100),50, (0,1,0))
hideball = Surface(vts, fs, ns, cs, visible=False)
hideball = meshes.add_surf('hideball', hideball)
meshes.background = (0, 0, 0.3)
cnf.set_mesh(meshes)
cnf.Show()
if __name__ == '__main__':
app = wx.App()
add_with_para()
mesh_obj_test()
app.MainLoop()
| [
"imagepy@sina.com"
] | imagepy@sina.com |
c996297347ac933fba624d954a3d78dd294d33ff | 75ec986d34d5391d46d6469c513626f69f5d978d | /Incepator/cycles/cycles3.py | e733dc4092d410e8ac117846f3fbe5203f2b7e44 | [] | no_license | CatrunaMarius/python | d9f8dc221458e4b65c3f801daf3b59aa2b946358 | d063bffb4eafa56ac1e205c2d39fc893ab50e992 | refs/heads/master | 2020-04-24T05:23:22.756002 | 2020-01-06T11:56:12 | 2020-01-06T11:56:12 | 171,703,482 | 0 | 0 | null | 2019-02-20T16:12:39 | 2019-02-20T15:59:08 | null | UTF-8 | Python | false | false | 150 | py | #multiplication table (while)
i=1
while i<10:
j=1
while j<10:
print("%4d" % (i*j), end="")
j+=1
print()
i+=1
| [
"noreply@github.com"
] | CatrunaMarius.noreply@github.com |
bc89c88a357f13ea1706d544aa08b81f6fb6036f | 1bad7d2b7fc920ecf2789755ed7f44b039d4134d | /A other/エイシング プログラミング コンテスト 2020/B.py | 77e2b68e5bde2a0361da7ee0c237f62e8c1262a4 | [] | no_license | kanekyo1234/AtCoder_solve | ce95caafd31f7c953c0fc699f0f4897dddd7a159 | e5ea7b080b72a2a2fd3fcb826cd10c4ab2e2720e | refs/heads/master | 2023-04-01T04:01:15.885945 | 2021-04-06T04:03:31 | 2021-04-06T04:03:31 | 266,151,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | n = int(input())
a = list(map(int, input().split()))
ans = 0
for i in range(n):
if (i+1) % 2 == 1 and a[i] % 2 == 1:
ans += 1
print(ans)
| [
"kanekyohunter.0314@softbank.ne.jp"
] | kanekyohunter.0314@softbank.ne.jp |
dffe88ae437ac28f4a3898604f43f1694690b380 | 8ee8fe3c2acea497a85428bfb3dfde19e58b2bc3 | /test-examples/range_one_image.py | 5b8657935e4171ac7ebfc64c064995200cabb8e0 | [
"BSD-3-Clause"
] | permissive | sofroniewn/image-demos | a6e46f08fd4ce621aa96d6b6378b50f63ac2b381 | 2eeeb23f34a47798ae7be0987182724ee3799eb8 | refs/heads/master | 2022-11-02T23:50:23.098830 | 2022-10-30T04:38:19 | 2022-10-30T04:38:19 | 179,378,745 | 11 | 1 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | """
Test adding an image with a range one dimensions.
There should be no slider shown for the axis corresponding to the range
one dimension.
"""
import numpy as np
from skimage import data
import napari
with napari.gui_qt():
viewer = napari.view_image(np.random.random((4, 4, 1, 30, 40)))
| [
"sofroniewn@gmail.com"
] | sofroniewn@gmail.com |
d15b12f713424c1f85485dd0dbc9c96a2b451d2c | 63f443df12960b3eda3ac23e4a2c343b2c8a04f8 | /text/1.py | 1debf0c03aa5d4f7cbddfe0b3a77a13991bcc068 | [] | no_license | persontianshuang/crapy500m | 7c30e101da6f578c475a63117fa062000ce9460f | 0e725f037a460fc01926000e023d4f5dbb6c1f4e | refs/heads/master | 2021-08-09T02:54:49.102719 | 2017-11-12T00:52:37 | 2017-11-12T00:52:37 | 102,925,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import os
file="C:\\Users\Administrator\Desktop\不侵权\沙拉盘不侵权-38.xls"
with open(file, 'r',encoding='gbk') as f:
lines = f.readlines()
for line in lines:
line = line
print(line)
| [
"mengyouhan@gmail.com"
] | mengyouhan@gmail.com |
cecd22f5838eed05a65114d0526a5383ebf2af83 | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /build/config/linux/pkg-config.py | 32068ada80b0b4a41d1a52a4c379f1fec68a3456 | [
"BSD-3-Clause"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 7,689 | py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import subprocess
import sys
import re
from optparse import OptionParser
# This script runs pkg-config, optionally filtering out some results, and
# returns the result.
#
# The result will be [ <includes>, <cflags>, <libs>, <lib_dirs>, <ldflags> ]
# where each member is itself a list of strings.
#
# You can filter out matches using "-v <regexp>" where all results from
# pkgconfig matching the given regular expression will be ignored. You can
# specify more than one regular expression my specifying "-v" more than once.
#
# You can specify a sysroot using "-s <sysroot>" where sysroot is the absolute
# system path to the sysroot used for compiling. This script will attempt to
# generate correct paths for the sysroot.
#
# When using a sysroot, you must also specify the architecture via
# "-a <arch>" where arch is either "x86" or "x64".
#
# CrOS systemroots place pkgconfig files at <systemroot>/usr/share/pkgconfig
# and one of <systemroot>/usr/lib/pkgconfig or <systemroot>/usr/lib64/pkgconfig
# depending on whether the systemroot is for a 32 or 64 bit architecture. They
# specify the 'lib' or 'lib64' of the pkgconfig path by defining the
# 'system_libdir' variable in the args.gn file. pkg_config.gni communicates this
# variable to this script with the "--system_libdir <system_libdir>" flag. If no
# flag is provided, then pkgconfig files are assumed to come from
# <systemroot>/usr/lib/pkgconfig.
#
# Additionally, you can specify the option --atleast-version. This will skip
# the normal outputting of a dictionary and instead print true or false,
# depending on the return value of pkg-config for the given package.
def SetConfigPath(options):
"""Set the PKG_CONFIG_LIBDIR environment variable.
This takes into account any sysroot and architecture specification from the
options on the given command line.
"""
sysroot = options.sysroot
assert sysroot
# Compute the library path name based on the architecture.
arch = options.arch
if sysroot and not arch:
print "You must specify an architecture via -a if using a sysroot."
sys.exit(1)
libdir = sysroot + '/usr/' + options.system_libdir + '/pkgconfig'
libdir += ':' + sysroot + '/usr/share/pkgconfig'
os.environ['PKG_CONFIG_LIBDIR'] = libdir
return libdir
def GetPkgConfigPrefixToStrip(options, args):
"""Returns the prefix from pkg-config where packages are installed.
This returned prefix is the one that should be stripped from the beginning of
directory names to take into account sysroots.
"""
# Some sysroots, like the Chromium OS ones, may generate paths that are not
# relative to the sysroot. For example,
# /path/to/chroot/build/x86-generic/usr/lib/pkgconfig/pkg.pc may have all
# paths relative to /path/to/chroot (i.e. prefix=/build/x86-generic/usr)
# instead of relative to /path/to/chroot/build/x86-generic (i.e prefix=/usr).
# To support this correctly, it's necessary to extract the prefix to strip
# from pkg-config's |prefix| variable.
prefix = subprocess.check_output([options.pkg_config,
"--variable=prefix"] + args, env=os.environ)
if prefix[-4] == '/usr':
return prefix[4:]
return prefix
def MatchesAnyRegexp(flag, list_of_regexps):
"""Returns true if the first argument matches any regular expression in the
given list."""
for regexp in list_of_regexps:
if regexp.search(flag) != None:
return True
return False
def RewritePath(path, strip_prefix, sysroot):
"""Rewrites a path by stripping the prefix and prepending the sysroot."""
if os.path.isabs(path) and not path.startswith(sysroot):
if path.startswith(strip_prefix):
path = path[len(strip_prefix):]
path = path.lstrip('/')
return os.path.join(sysroot, path)
else:
return path
def main():
# If this is run on non-Linux platforms, just return nothing and indicate
# success. This allows us to "kind of emulate" a Linux build from other
# platforms.
if "linux" not in sys.platform:
print "[[],[],[],[],[]]"
return 0
parser = OptionParser()
parser.add_option('-d', '--debug', action='store_true')
parser.add_option('-p', action='store', dest='pkg_config', type='string',
default='pkg-config')
parser.add_option('-v', action='append', dest='strip_out', type='string')
parser.add_option('-s', action='store', dest='sysroot', type='string')
parser.add_option('-a', action='store', dest='arch', type='string')
parser.add_option('--system_libdir', action='store', dest='system_libdir',
type='string', default='lib')
parser.add_option('--atleast-version', action='store',
dest='atleast_version', type='string')
parser.add_option('--libdir', action='store_true', dest='libdir')
(options, args) = parser.parse_args()
# Make a list of regular expressions to strip out.
strip_out = []
if options.strip_out != None:
for regexp in options.strip_out:
strip_out.append(re.compile(regexp))
if options.sysroot:
libdir = SetConfigPath(options)
if options.debug:
sys.stderr.write('PKG_CONFIG_LIBDIR=%s\n' % libdir)
prefix = GetPkgConfigPrefixToStrip(options, args)
else:
prefix = ''
if options.atleast_version:
# When asking for the return value, just run pkg-config and print the return
# value, no need to do other work.
if not subprocess.call([options.pkg_config,
"--atleast-version=" + options.atleast_version] +
args):
print "true"
else:
print "false"
return 0
if options.libdir:
cmd = [options.pkg_config, "--variable=libdir"] + args
if options.debug:
sys.stderr.write('Running: %s\n' % cmd)
try:
libdir = subprocess.check_output(cmd)
except:
print "Error from pkg-config."
return 1
sys.stdout.write(libdir.strip())
return 0
cmd = [options.pkg_config, "--cflags", "--libs"] + args
if options.debug:
sys.stderr.write('Running: %s\n' % ' '.join(cmd))
try:
flag_string = subprocess.check_output(cmd)
except:
sys.stderr.write('Could not run pkg-config.\n')
return 1
# For now just split on spaces to get the args out. This will break if
# pkgconfig returns quoted things with spaces in them, but that doesn't seem
# to happen in practice.
all_flags = flag_string.strip().split(' ')
sysroot = options.sysroot
if not sysroot:
sysroot = ''
includes = []
cflags = []
libs = []
lib_dirs = []
ldflags = []
for flag in all_flags[:]:
if len(flag) == 0 or MatchesAnyRegexp(flag, strip_out):
continue;
if flag[:2] == '-l':
libs.append(RewritePath(flag[2:], prefix, sysroot))
elif flag[:2] == '-L':
lib_dirs.append(RewritePath(flag[2:], prefix, sysroot))
elif flag[:2] == '-I':
includes.append(RewritePath(flag[2:], prefix, sysroot))
elif flag[:3] == '-Wl':
ldflags.append(flag)
elif flag == '-pthread':
# Many libs specify "-pthread" which we don't need since we always include
# this anyway. Removing it here prevents a bunch of duplicate inclusions
# on the command line.
pass
else:
cflags.append(flag)
# Output a GN array, the first one is the cflags, the second are the libs. The
# JSON formatter prints GN compatible lists when everything is a list of
# strings.
print json.dumps([includes, cflags, libs, lib_dirs, ldflags])
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"jacob-chen@iotwrt.com"
] | jacob-chen@iotwrt.com |
69743a42fb64f06773a238d89bfac39069777e78 | 39d4504ec1da8975fac526d6801b94f4348b6b61 | /research/object_detection/dataset_tools/create_oid_tf_record.py | c5f409af4da470ff0c9da20f6a8793306a1016f3 | [
"Apache-2.0"
] | permissive | vincentcheny/models | fe0ff5888e6ee00a0d4fa5ee14154acdbeebe7ad | afb1a59fc1bc792ac72d1a3e22e2469020529788 | refs/heads/master | 2020-07-23T21:38:24.559521 | 2019-11-15T07:50:11 | 2019-11-15T07:50:11 | 207,712,649 | 1 | 0 | Apache-2.0 | 2019-09-11T03:12:31 | 2019-09-11T03:12:31 | null | UTF-8 | Python | false | false | 5,315 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| [
"1155107977@link.cuhk.edu.hk"
] | 1155107977@link.cuhk.edu.hk |
0abd0228cbd16d4d2350bd9c779f9884e2e9aa4a | e1bdbd08afec39c1ee56a3885a837ec966543a2d | /Section_07_code/extract_freq_features.py | 00d66469084172389834335b106181bd1cd6dc6e | [
"MIT"
] | permissive | PacktPublishing/Python-Machine-Learning-Solutions-V- | 507bd8b285f051d2761a5348e4a8c9a50329287a | 8bb80a43a7c64032c25c1023faaa29bbfbd39d45 | refs/heads/master | 2023-02-28T05:19:49.782472 | 2021-01-20T09:11:09 | 2021-01-20T09:11:09 | 188,817,647 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from python_speech_features import mfcc, logfbank
# Read input sound file
sampling_freq, audio = wavfile.read("input_freq.wav")
# Extract MFCC and Filter bank features
mfcc_features = mfcc(audio, sampling_freq)
filterbank_features = logfbank(audio, sampling_freq)
# Print parameters
print('\nMFCC:\nNumber of windows =', mfcc_features.shape[0])
print('Length of each feature =', mfcc_features.shape[1])
print('\nFilter bank:\nNumber of windows =', filterbank_features.shape[0])
print('Length of each feature =', filterbank_features.shape[1]
# Plot the features
mfcc_features= mfcc_features.T
plt.matshow(mfcc_features)
plt.title('MFCC')
filterbank_features = filterbank_features.T
plt.matshow(filterbank_features)
plt.title('Filter bank')
plt.show()
| [
"sonalis@packtpub.com"
] | sonalis@packtpub.com |
02dd1640ec63d777df0a4a38be03877dc2b326dc | df64b9d1851d3b5770ef4cd726bb6898911d8aff | /protos/final_tree.py | 66152b4264328ecb47431aed5f8376566a9027e7 | [] | no_license | lampts/kaggle_quora | 10bbd5e16f659719bb4eee5bd5adaf2cc9a08737 | d35d2619e8f2762cc648eb0829a4c54e698871ea | refs/heads/master | 2021-07-11T04:33:46.640082 | 2017-05-28T08:43:01 | 2017-05-28T08:43:01 | 108,620,557 | 0 | 1 | null | 2017-10-28T04:49:50 | 2017-10-28T04:49:50 | null | UTF-8 | Python | false | false | 4,147 | py | from sklearn.model_selection import cross_val_predict
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm.sklearn import LGBMClassifier
import pandas
import pickle
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import GridSearchCV, ParameterGrid, StratifiedKFold, cross_val_predict
from tfidf_k import calc_weight
from sklearn.metrics import log_loss, roc_auc_score
from logging import StreamHandler, DEBUG, Formatter, FileHandler
log_fmt = Formatter('%(asctime)s %(name)s %(lineno)d [%(levelname)s][%(funcName)s] %(message)s ')
from logging import getLogger
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel('INFO')
handler.setFormatter(log_fmt)
logger.setLevel('INFO')
logger.addHandler(handler)
aaa = pandas.read_csv('clique_data.csv')
sample_weight = calc_weight(aaa['label'].values)
# , 'emax', 'emin'] # , # 'l_score', 'r_score', 'm_score'] #
use_cols = ['cnum', 'pred', 'new', 'vmax', 'vmin', 'vavg'] # , 'emax', 'emin']
use_cols = ['cnum', 'pred', 'vmax', 'vmin', 'vavg'] # , 'emax', 'emin']
#'l_num', 'r_num', 'm_num']
x_train = aaa[use_cols].values
y_train = aaa['label'].values
all_params = {'max_depth': [5], # [14],
'learning_rate': [0.02], # [0.06, 0.1, 0.2],
'n_estimators': [10000],
'min_child_weight': [1],
'colsample_bytree': [0.7],
'boosting_type': ['gbdt'],
#'num_leaves': [32, 100, 200], # [1300, 1500, 2000],
'subsample': [0.99],
'min_child_samples': [5],
'reg_alpha': [0],
'reg_lambda': [0],
'max_bin': [500],
'min_split_gain': [0.1],
'silent': [True],
'seed': [2261]
}
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=871)
min_score = (100, 100, 100)
min_params = None
use_score = 0
logger.info('x size {}'.format(x_train.shape))
for params in tqdm(list(ParameterGrid(all_params))):
cnt = 0
list_score = []
list_score2 = []
list_best_iter = []
all_pred = np.zeros(y_train.shape[0])
for train, test in cv.split(x_train, y_train):
trn_x = x_train[train]
val_x = x_train[test]
trn_y = y_train[train]
val_y = y_train[test]
trn_w = sample_weight[train]
val_w = sample_weight[test]
clf = LGBMClassifier(**params)
clf.fit(trn_x, trn_y,
sample_weight=trn_w,
eval_sample_weight=[val_w],
eval_set=[(val_x, val_y)],
verbose=False,
# eval_metric='logloss',
early_stopping_rounds=100
)
pred = clf.predict_proba(val_x)[:, 1]
_score = log_loss(val_y, pred, sample_weight=val_w)
_score2 = - roc_auc_score(val_y, pred, sample_weight=val_w)
list_score.append(_score)
list_score2.append(_score2)
if clf.best_iteration != -1:
list_best_iter.append(clf.best_iteration)
else:
list_best_iter.append(params['n_estimators'])
logger.info('trees: {}'.format(list_best_iter))
params['n_estimators'] = np.mean(list_best_iter, dtype=int)
score = (np.mean(list_score), np.min(list_score), np.max(list_score))
score2 = (np.mean(list_score2), np.min(list_score2), np.max(list_score2))
logger.info('param: %s' % (params))
logger.info('loss: {} (avg min max {})'.format(score[use_score], score))
logger.info('score: {} (avg min max {})'.format(score2[use_score], score2))
if min_score[use_score] > score[use_score]:
min_score = score
min_score2 = score2
min_params = params
logger.info('best score: {} {}'.format(min_score[use_score], min_score))
logger.info('best score2: {} {}'.format(min_score2[use_score], min_score2))
logger.info('best_param: {}'.format(min_params))
final_tree = LGBMClassifier(**min_params)
final_tree.fit(x_train, y_train, sample_weight=sample_weight)
with open('final_tree.pkl', 'wb') as f:
pickle.dump(final_tree, f, -1)
| [
"you@example.com"
] | you@example.com |
c519d75ba0c468f9c100a50c6b1a7cdb34ba4573 | f1679e8c872e7e5d12d947a47920d5bad4d3b92a | /paciente/migrations/0002_paciente_imagem.py | 7ed0e6a8112b42ff40c7a8f125867af6457ec1e6 | [] | no_license | lldenisll/doctor_backend | d03e6090fae704bae9eabd002aefeb63de5a9d78 | 071911186b1f2940feff7549ca9c49b9f8c7ce22 | refs/heads/master | 2023-03-27T21:28:49.571818 | 2021-04-04T22:27:45 | 2021-04-04T22:27:45 | 354,152,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.1.7 on 2021-04-03 19:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paciente', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='paciente',
name='imagem',
field=models.ImageField(null=True, upload_to='img'),
),
]
| [
"namorado@TFGcos-MacBook-Pro.local"
] | namorado@TFGcos-MacBook-Pro.local |
e88613a1e28d2556c2ad8050f5e8ab0266b4c4d4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_282/ch23_2020_03_04_22_50_49_571106.py | e8af7c8bab6de99ba6d7382438bd6a574da48447 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | velocidade = int(input('qual eh a velocidade? '))
if velocidade>80:
print('multa de R${0:.2f}'.format((velocidade-80)*5))
else:
print('Não foi multado') | [
"you@example.com"
] | you@example.com |
931ad54b3f9cc8bcf4700cad46f9d5985056e646 | bd435e3ff491d13c3cb1ffcf34771ac1c80f7859 | /code/flask/bookshare/app/views.py | a4a2b0f8c1669efd4097e629e116ff3682494636 | [] | no_license | luningcowboy/PythonTutorial | 8f4b6d16e0fad99a226540a6f12639ccdff402ff | 9024efe8ed22aca0a1271a2c1c388d3ffe1e6690 | refs/heads/master | 2021-06-16T23:03:22.153473 | 2020-04-09T13:52:12 | 2020-04-09T13:52:12 | 187,571,993 | 0 | 0 | null | 2021-03-25T23:02:36 | 2019-05-20T05:16:13 | Python | UTF-8 | Python | false | false | 2,211 | py | from flask import render_template, url_for, redirect
from app import app
books = [
{'name':'呐喊1','author':'鲁迅1','id':0,'desc':'这是一本关于呐喊的书','contents':['xxxxx','xxxx','xxxx'],'download':['https://www.baidu.com','https://www.baidu.com'],'pic_url':'http://haodoo.net/covers/17Z7.jpg'},
{'name':'呐喊2','author':'鲁迅2','id':1,'desc':'这是一本关于呐喊的书','contents':['xxxxx','xxxx','xxxx'],'download':['https://www.baidu.com','https://www.baidu.com'],'pic_url':'http://haodoo.net/covers/17Z7.jpg'},
{'name':'呐喊3','author':'鲁迅3','id':2,'desc':'这是一本关于呐喊的书','contents':['xxxxx','xxxx','xxxx'],'download':['https://www.baidu.com','https://www.baidu.com'],'pic_url':'http://haodoo.net/covers/17Z7.jpg'},
{'name':'呐喊4','author':'鲁迅4','id':3,'desc':'这是一本关于呐喊的书','contents':['xxxxx','xxxx','xxxx'],'download':['https://www.baidu.com','https://www.baidu.com'],'pic_url':'http://haodoo.net/covers/17Z7.jpg'}
]
types = [
{'name':'计算机','tag':'1'},
{'name':'小说','tag':'2'},
{'name':'小说1','tag':'3'},
{'name':'小说2','tag':'4'},
{'name':'计算机','tag':'5'},
{'name':'小说','tag':'6'},
{'name':'小说1','tag':'7'},
{'name':'小说2','tag':'8'}]
def getTypes():
ret = []
for t in types:
print('getTypes', t)
t['url'] = url_for('type', type=t['tag'])
ret.append(t)
return ret
def getBooks():
ret = []
for b in books:
b['url'] = url_for('book_desc', book_id=b['id'])
ret.append(b)
return ret
@app.route("/")
@app.route("/index/")
def index():
return render_template('index.html',types=getTypes(),books=getBooks())
@app.route("/type/<type>")
def type(type):
tmpBooks = getBooks()
return render_template('type.html', type=type,books=books,types=getTypes())
@app.route("/book_desc/<book_id>")
def book_desc(book_id):
if not book_id:
return redirect(url_for('index.html'))
tmpBooks = getBooks()
bookInfo = tmpBooks[int(book_id)]
return render_template('book_desc.html', bookInfo=bookInfo, types=getTypes())
| [
"luningcowboy@gmail.com"
] | luningcowboy@gmail.com |
0b099bf370ff65eb2b99e6e3a29c31e8d1e4e3b5 | 5d1892f6db3c7bba1699455d934b6193840346c6 | /swig/swig_example/cppfunctions.py | 09785e80b6dd21e7d3ccd60aaa2a23f80b3096e8 | [] | no_license | sylvaus/python_bindings | ecd6595324e73ec66753cb5c12d86788b10beaa8 | ddf3ed7a59497c2f73fc47dfaa752d629ae98d99 | refs/heads/master | 2021-03-22T09:07:54.492561 | 2020-03-14T20:49:41 | 2020-03-14T20:49:41 | 247,351,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,024 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _cppfunctions
else:
import _cppfunctions
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SwigPyIterator(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _cppfunctions.delete_SwigPyIterator
def value(self):
return _cppfunctions.SwigPyIterator_value(self)
def incr(self, n=1):
return _cppfunctions.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _cppfunctions.SwigPyIterator_decr(self, n)
def distance(self, x):
return _cppfunctions.SwigPyIterator_distance(self, x)
def equal(self, x):
return _cppfunctions.SwigPyIterator_equal(self, x)
def copy(self):
return _cppfunctions.SwigPyIterator_copy(self)
def next(self):
return _cppfunctions.SwigPyIterator_next(self)
def __next__(self):
return _cppfunctions.SwigPyIterator___next__(self)
def previous(self):
return _cppfunctions.SwigPyIterator_previous(self)
def advance(self, n):
return _cppfunctions.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _cppfunctions.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _cppfunctions.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _cppfunctions.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _cppfunctions.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _cppfunctions.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _cppfunctions.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
# Register SwigPyIterator in _cppfunctions:
_cppfunctions.SwigPyIterator_swigregister(SwigPyIterator)
class IntVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def iterator(self):
return _cppfunctions.IntVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _cppfunctions.IntVector___nonzero__(self)
def __bool__(self):
return _cppfunctions.IntVector___bool__(self)
def __len__(self):
return _cppfunctions.IntVector___len__(self)
def __getslice__(self, i, j):
return _cppfunctions.IntVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _cppfunctions.IntVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _cppfunctions.IntVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _cppfunctions.IntVector___delitem__(self, *args)
def __getitem__(self, *args):
return _cppfunctions.IntVector___getitem__(self, *args)
def __setitem__(self, *args):
return _cppfunctions.IntVector___setitem__(self, *args)
def pop(self):
return _cppfunctions.IntVector_pop(self)
def append(self, x):
return _cppfunctions.IntVector_append(self, x)
def empty(self):
return _cppfunctions.IntVector_empty(self)
def size(self):
return _cppfunctions.IntVector_size(self)
def swap(self, v):
return _cppfunctions.IntVector_swap(self, v)
def begin(self):
return _cppfunctions.IntVector_begin(self)
def end(self):
return _cppfunctions.IntVector_end(self)
def rbegin(self):
return _cppfunctions.IntVector_rbegin(self)
def rend(self):
return _cppfunctions.IntVector_rend(self)
def clear(self):
return _cppfunctions.IntVector_clear(self)
def get_allocator(self):
return _cppfunctions.IntVector_get_allocator(self)
def pop_back(self):
return _cppfunctions.IntVector_pop_back(self)
def erase(self, *args):
return _cppfunctions.IntVector_erase(self, *args)
def __init__(self, *args):
_cppfunctions.IntVector_swiginit(self, _cppfunctions.new_IntVector(*args))
def push_back(self, x):
return _cppfunctions.IntVector_push_back(self, x)
def front(self):
return _cppfunctions.IntVector_front(self)
def back(self):
return _cppfunctions.IntVector_back(self)
def assign(self, n, x):
return _cppfunctions.IntVector_assign(self, n, x)
def resize(self, *args):
return _cppfunctions.IntVector_resize(self, *args)
def insert(self, *args):
return _cppfunctions.IntVector_insert(self, *args)
def reserve(self, n):
return _cppfunctions.IntVector_reserve(self, n)
def capacity(self):
return _cppfunctions.IntVector_capacity(self)
__swig_destroy__ = _cppfunctions.delete_IntVector
# Register IntVector in _cppfunctions:
_cppfunctions.IntVector_swigregister(IntVector)
def plus_two_list(v):
return _cppfunctions.plus_two_list(v)
| [
"pierreyves.breches74@gmail.com"
] | pierreyves.breches74@gmail.com |
49df7f342de48948163f94f572a958289a1e794f | 442fd46c2647d4988d409563d50a841fc1cdf259 | /tasks/__init__.py | 1412a06ab5b6c8a385eaddc2bf7482c04fd97226 | [
"MIT"
] | permissive | pycontw/mail_handler | 211278ac64facd106e8a85c40d0b3b3e56ba3603 | 5181d7ded2a8e43c11798ceb2e77bf87b6218276 | refs/heads/master | 2022-12-09T15:34:21.031968 | 2022-04-22T22:53:16 | 2022-04-22T22:53:16 | 203,540,182 | 10 | 16 | MIT | 2022-12-09T06:15:13 | 2019-08-21T08:25:24 | Python | UTF-8 | Python | false | false | 308 | py | from invoke import Collection
from tasks import doc, env, git, secure, style, test
from tasks.build import build_ns
ns = Collection()
ns.add_collection(env)
ns.add_collection(git)
ns.add_collection(test)
ns.add_collection(style)
ns.add_collection(build_ns)
ns.add_collection(doc)
ns.add_collection(secure)
| [
"weilee.rx@gmail.com"
] | weilee.rx@gmail.com |
e4656b2fa445618865fe0cc04e2b7a95eaf810e5 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/core/2017/4/unifi.py | 42b5070b0461bc90cbbea23482de85c0cd25bb7f | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 3,383 | py | """
Support for Unifi WAP controllers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.unifi/
"""
import logging
import urllib
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
import homeassistant.loader as loader
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.const import CONF_VERIFY_SSL
REQUIREMENTS = ['pyunifi==2.0']
_LOGGER = logging.getLogger(__name__)
CONF_PORT = 'port'
CONF_SITE_ID = 'site_id'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8443
DEFAULT_VERIFY_SSL = True
NOTIFICATION_ID = 'unifi_notification'
NOTIFICATION_TITLE = 'Unifi Device Tracker Setup'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_SITE_ID, default='default'): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
})
def get_scanner(hass, config):
"""Set up the Unifi device_tracker."""
from pyunifi.controller import Controller
host = config[DOMAIN].get(CONF_HOST)
username = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
site_id = config[DOMAIN].get(CONF_SITE_ID)
port = config[DOMAIN].get(CONF_PORT)
verify_ssl = config[DOMAIN].get(CONF_VERIFY_SSL)
persistent_notification = loader.get_component('persistent_notification')
try:
ctrl = Controller(host, username, password, port, version='v4',
site_id=site_id, ssl_verify=verify_ssl)
except urllib.error.HTTPError as ex:
_LOGGER.error("Failed to connect to Unifi: %s", ex)
persistent_notification.create(
hass, 'Failed to connect to Unifi. '
'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
return UnifiScanner(ctrl)
class UnifiScanner(DeviceScanner):
"""Provide device_tracker support from Unifi WAP client data."""
def __init__(self, controller):
"""Initialize the scanner."""
self._controller = controller
self._update()
def _update(self):
"""Get the clients from the device."""
try:
clients = self._controller.get_clients()
except urllib.error.HTTPError as ex:
_LOGGER.error("Failed to scan clients: %s", ex)
clients = []
self._clients = {client['mac']: client for client in clients}
def scan_devices(self):
"""Scan for devices."""
self._update()
return self._clients.keys()
def get_device_name(self, mac):
"""Return the name (if known) of the device.
If a name has been set in Unifi, then return that, else
return the hostname if it has been detected.
"""
client = self._clients.get(mac, {})
name = client.get('name') or client.get('hostname')
_LOGGER.debug("Device %s name %s", mac, name)
return name
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
e549afef111dba3869240fa2b3c410db40d5f07c | e6fea6eb27c169642454674f20e6b2db0dc2f738 | /word2vec/get_close_words.py | 8cd7902e4b7dc56529a133344553ebabbe2ca9bd | [] | no_license | muntakimrafi/insbcn | 1db79e03cf6440f57da90a6e4fa8f047bee63d38 | 25e1b8394d1727a792591f5fb3f9e309e245cc50 | refs/heads/master | 2021-03-25T13:45:17.047609 | 2018-08-03T10:40:12 | 2018-08-03T10:40:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | from gensim import models
lans = ['en','es','ca']
model_name = 'word2vec_model_instaBarcelona_lan.model'
model_path = '../../../datasets/instaBarcelona/models/word2vec/' + model_name
models_list = []
print "Loading models ... "
for l in lans:
models_list.append(models.Word2Vec.load(model_path.replace('lan',l)))
# districts = ['surf','ciutatvella', 'eixample', 'santsmontjuic', 'lescorts', 'sarria', 'gracia', 'hortaguinardo', 'noubarris', 'santandreu', 'santmarti']
# districts += ['poblenou','poblesec','sagradafamilia','barceloneta','gothic','vallcarca','gotic','gotico','viladegracia','viladegracia','vallvidrera','diagonalmar','raval','born','borne']
# districts = ['elborn','santmarti','poblesec','barceloneta','gothic','vallcarca','gotic','gotico','born','raval','sants','poblenou','vallcarca','viladegracia','gracia','sagradafamilia','vallvidrera']
districts = ['poblesec','poblenou','born']
print "Checking models"
for d in districts:
print '\n' + d
for m in models_list:
try:
topw = m.wv.most_similar(positive=[d], topn=30)
except:
topw = [('Not in voc','')]
toprint = ''
for w in topw:
toprint += str(w[0]) + ' '
print toprint
print "DONE" | [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
ac2872f4a29ec3412b06173421e13d01004ea1da | 3536c71ef08e52088a10e1b62e943386e32ccc1e | /docs/conf.py | 6feb5b7f670bbd6f19eb7ad996857aa5ed817a20 | [
"Apache-2.0"
] | permissive | Aspire1Inspire2/tdameritrade | 66698ef9a1444f930bf9a57f29d233d3f1e5215a | 00d93ca355b7f4f9a7f5041ea5096e9b6f3bb228 | refs/heads/master | 2020-05-17T11:30:48.443511 | 2019-04-03T21:29:42 | 2019-04-03T21:29:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,287 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tdameritrade documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 12 22:07:11 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tdameritrade'
copyright = '2018, Tim Paine'
author = 'Tim Paine'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v0.0.8'
# The full version, including alpha/beta/rc tags.
release = 'v0.0.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'tdameritradedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tdameritrade.tex', 'tdameritrade Documentation',
'Tim Paine', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tdameritrade', 'tdameritrade Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tdameritrade', 'tdameritrade Documentation',
author, 'tdameritrade', 'One line description of project.',
'Miscellaneous'),
]
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
5f8e18df7e9043614b5b0ecf4d2dfbb9fdbba54b | a0dda8be5892a390836e19bf04ea1d098e92cf58 | /叶常春视频例题/chap07/7-3-1-用while检测列表为空.py | 07d112954fbb6a310ba170b0ffbb0422ca5eee39 | [] | no_license | wmm98/homework1 | d9eb67c7491affd8c7e77458ceadaf0357ea5e6b | cd1f7f78e8dbd03ad72c7a0fdc4a8dc8404f5fe2 | refs/heads/master | 2020-04-14T19:22:21.733111 | 2019-01-08T14:09:58 | 2019-01-08T14:09:58 | 164,055,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | #例7-3-1 用while语句检测列表是否为空
# 首先,创建一个待验证用户列表
# 和一个用于存储已验证用户的空列表
unconfirmed_users = ['alice', 'brian', 'candace']
confirmed_users = []
# 验证每个用户,直到没有未验证用户为止
# 将每个经过验证的列表都移到已验证用户列表中
while unconfirmed_users: #当unconfirmed_users列表不为空,则...
current_user = unconfirmed_users.pop()
print("正在验证用户: " + current_user.title()) #模仿验证用户动作
confirmed_users.append(current_user)
# 显示所有已验证的用户
print("\n以下用户验证通过:")
for confirmed_user in confirmed_users:
print(confirmed_user.title()) | [
"792545884@qq.com"
] | 792545884@qq.com |
34d2334c3931b648f94be9e0a25b6ca4b2b4d527 | 0042c37405a7865c50b7bfa19ca531ec36070318 | /20_selenium/test_incontrol/incontrol_picture.py | b06cfca4773c8ba95547cdb313b520192666e3fb | [] | no_license | lu-judong/untitled1 | b7d6e1ad86168673283917976ef0f5c2ad97d9e0 | aa158e7541bae96332633079d67b5ab19ea29e71 | refs/heads/master | 2022-05-23T18:55:45.272216 | 2020-04-28T09:55:38 | 2020-04-28T09:55:38 | 257,822,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,831 | py | from selenium import webdriver
from new_selenium.bin.login import Login
from new_selenium.bin.main import Method
import time
from new_selenium.tech_incontrol.incontrol_config import *
from config.config import path_dir
from config.log_config import logger
class Fault:
def log_file_out(self,msg):
fo = open(r'{}/usecase.txt'.format(path_dir), mode='a', encoding='utf-8')
fo.write(msg + '\r\n')
fo.close()
def picture(self,url,username,password):
driver = webdriver.Chrome()
Login().login(url,username, password, driver)
self.log_file_out('-----内控指标图表点击-----')
for i in contents:
try:
Method(driver).contains_xpath('click',i)
time.sleep(2)
self.log_file_out('点击'+i+'成功')
except Exception as e:
logger.error(e)
self.log_file_out('点击' + i + '失败')
try:
Method(driver).switch_out()
Method(driver).switch_iframe(
driver.find_element_by_xpath("//iframe[contains(@src,'/darams/a/inControl')]"))
self.log_file_out('切入内控指标成功')
except:
self.log_file_out('切入内控指标失败')
driver.find_element_by_xpath("//a[contains(text(),\'{}\')]/../../td[7]/a[1]".format('111')).click()
time.sleep(2)
Method(driver).switch_out()
driver.find_element_by_class_name('layui-layer-btn0').click()
time.sleep(2)
Method(driver).switch_out()
Method(driver).switch_iframe(
driver.find_element_by_xpath("//iframe[contains(@src,'/darams/a/inControl')]"))
time.sleep(5)
driver.find_element_by_xpath("//a[contains(text(),\'{}\')]/../../td[7]/a[2]".format('111')).click()
Method(driver).switch_out()
incontrol_p = Method(driver).get_attr('css', "[class='layui-layer layui-layer-iframe']", 'times')
Method(driver).switch_iframe('layui-layer-iframe' + incontrol_p)
home_handles = driver.current_window_handle
time.sleep(2)
value_com = driver.execute_script('var aa = echarts.getInstanceByDom($("#myChart2")[0]);' \
'var option = aa.getOption();' \
'return [option.series[0].data[0].value[0]]')
js1 = 'myChart2.trigger("dblclick",{"data":{"path":"苏州华兴致远电子科技有限公司"},"componentType":"series","seriesType":"treemap"})'
try:
driver.execute_script(js1)
self.log_file_out('点击供应商内控指标图表成功')
except:
self.log_file_out('点击供应商内控指标图表失败')
time.sleep(2)
all_handle = driver.window_handles
for i in all_handle:
if i != home_handles:
driver.switch_to.window(i)
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div[2]/div[1]/div[2]/button[1]').click()
time.sleep(10)
# for i in range(0,len(driver.find_elements_by_xpath('//*[@id="opFaultOrderTable"]/tbody/tr/td[12]'))):
# print(driver.find_elements_by_xpath('//*[@id="opFaultOrderTable"]/tbody/tr/td[12]')[i].text)
status_aa = driver.execute_script(
'return $("#opFaultOrderTable").bootstrapTable("getData").map(function(row){return $(row).attr("mainResponsibility")}).some(function(item){return item !="苏州华兴致远电子科技有限公司"})')
value_com1 = driver.execute_script('return $("#opFaultOrderTable").bootstrapTable("getData").map(function(row){return $(row).attr("mainResponsibility")}).length')
if status_aa is False and value_com[0] == value_com1:
self.log_file_out('主要责任单位数值正确')
else:
self.log_file_out('主要责任单位数值不正确')
for i in all_handle:
if i != home_handles:
driver.close()
driver.switch_to.window(home_handles)
time.sleep(2)
# Method(driver).click('id','chart2')
# js1 = 'var aa = echarts.getInstanceByDom($("#chart2")[0]);' \
# 'var option = aa.getOption();' \
# 'param = {componentType:"series",name:option.yAxis[0].data[0],seriesName:"关联故障",seriesType:"bar",value:option.series[0].data[0]}; ' \
# 'skipTo(param, "RAILWAY_BUREAU");'
js2 = 'var title = "2017-07责任部室故障统计";' \
'var dateString = title.substring(0,title.length-8);' \
'if (dateString.length > 7){' \
'window.open ("/darams/a/fault/opFaultOrder/qList?confModelId=ed42931a637744a0a11141ccaccfd40b000 &chartType=INSIDE&depart=" + "转向架开发部");}else{window.open("/darams/a/fault/opFaultOrder/qList?confModelId=ed42931a637744a0a11141ccaccfd40b000&chartType=INSIDE&depart=" + "转向架开发部" + "&octMonthFrom=" + dateString + "&octMonthTo=" + dateString);}'
try:
driver.execute_script(js2)
self.log_file_out('点击责任部室图表成功')
except:
self.log_file_out('点击责任部室图表失败')
time.sleep(2)
all_handle1 = driver.window_handles
for i in all_handle1:
if i != home_handles:
driver.switch_to.window(i)
status_bb = driver.execute_script(
'return $("#opFaultOrderTable").bootstrapTable("getData").map(function(row){return $(row).attr("mainResponsibility")}).some(function(item){return item !="技术中心"})')
if status_bb is False:
self.log_file_out('责任部室验证正确')
else:
self.log_file_out('责任部室验证不正确')
url = 'http://192.168.1.115:8080/darams/a?login'
Fault().picture(url, 'test', '1234')
| [
"ljd_python@163.com"
] | ljd_python@163.com |
d5acc9da01fcf37cdad10fb33e9391c39115bb87 | b68c92fe89b701297f76054b0f284df5466eb698 | /Sorting/InsertionSort.py | c42a74592fdfb36e35685e6e8cbc61a47c65daaa | [] | no_license | makrandp/python-practice | 32381a8c589f9b499ab6bde8184a847b066112f8 | 60218fd79248bf8138158811e6e1b03261fb38fa | refs/heads/master | 2023-03-27T18:11:56.066535 | 2021-03-28T04:02:00 | 2021-03-28T04:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # Insertion sort is an algorithm that functions by iterating from 0 to n where n is the size of the input dataset
# For every iteration of i from 0 to n, we then swap from i to 0 given a situation where the variable is swappable
## Best ## Avrg ## Wrst ## Spce ##
## n # n^2 # n^2 # 1 ##
# Insertion sort is still bad, but it is stable and has a best case time complexity of n and a space complexity of 1
from typing import List
def insertionSort(arr: List[int]):
for i in range(1,len(arr)):
for j in range(i,0,-1):
if arr[j] < arr[j - 1]:
arr[j], arr[j-1] = arr[j-1], arr[j]
a = [5,2,7,9,0,1,3,4,2,15,25,35]
insertionSort(a)
print(a) | [
"awalexweber99@gmail.com"
] | awalexweber99@gmail.com |
f74f921086196d7e6c01a153fed48f3f0806ffdb | d1ff466d7a230409020ebc88aa2f2ffac8c45c15 | /cournot/pages.py | 68eecd9c2f47d2ed52156f48cc24aadc29827df9 | [
"MIT"
] | permissive | Muhammadahmad06/oTree | 224ef99a2ca55c8f2d7e67fec944b8efd0b21884 | 789fb2c2681aa5fbb8385f2f65a633e02592b225 | refs/heads/master | 2020-08-25T19:12:36.929702 | 2019-10-03T09:29:54 | 2019-10-03T09:29:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | from ._builtin import Page, WaitPage
from otree.api import Currency as c, currency_range
from .models import Constants
class Introduction(Page):
pass
class Decide(Page):
form_model = 'player'
form_fields = ['units']
class ResultsWaitPage(WaitPage):
body_text = "Waiting for the other participant to decide."
def after_all_players_arrive(self):
self.group.set_payoffs()
class Results(Page):
def vars_for_template(self):
return dict(
other_player_units=self.player.other_player().units,
)
page_sequence = [
Introduction,
Decide,
ResultsWaitPage,
Results
]
| [
"chris@otree.org"
] | chris@otree.org |
cff5b0ce9bad60d2be0a2346e00bfc63ed894ca8 | 6eea60bcbf206dafc5fe578b996267ce2bc9ae6e | /interviewbit/Magician_and_Chocolates.py | 5949fe44831449b74974a2e3381a8c91ec4d1949 | [] | no_license | SueAli/cs-problems | 491fef79f3e352d7712cd622d3b80ec15d38642b | b321116d135f868d88bd849b5ea7172feb74fb4c | refs/heads/master | 2023-08-31T10:46:30.374394 | 2023-08-24T20:14:04 | 2023-08-24T20:14:04 | 95,930,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import heapq
import math
class Solution:
# @param A : integer
# @param B : list of integers
# @return an integer
# Time complexity is O(n) + k * log (n)
# Space Complexity if we can not make any changes in the input array, extra space memory of O(n) will be
# required to build the heap
def nchoc(self, A, B):
r =0
m = ((10**9)+7)
h = [ item * -1 for item in B] # O(n)
heapq.heapify(h) # O(n)
for i in range(0,A):
curr = h[0] * -1
r = r + curr
heapq.heapreplace(h,-1 *int(math.floor(curr/2.))) #(log n)
return int(r) % m
s = Solution()
print s.nchoc(10, [ 2147483647, 2000000014, 2147483647 ])
#284628164
| [
"souad.hassanien@gmail.com"
] | souad.hassanien@gmail.com |
19a0fd4d3939243857e88c8327c49561837841ab | d05a59feee839a4af352b7ed2fd6cf10a288a3cb | /xlsxwriter/test/comparison/test_textbox29.py | 46aabfa9a927938c6129ee8a9c355269cb61109c | [
"BSD-2-Clause-Views"
] | permissive | elessarelfstone/XlsxWriter | 0d958afd593643f990373bd4d8a32bafc0966534 | bb7b7881c7a93c89d6eaac25f12dda08d58d3046 | refs/heads/master | 2020-09-24T06:17:20.840848 | 2019-11-24T23:43:01 | 2019-11-24T23:43:01 | 225,685,272 | 1 | 0 | NOASSERTION | 2019-12-03T18:09:06 | 2019-12-03T18:09:05 | null | UTF-8 | Python | false | false | 798 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox29.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', None, {'textlink': '=$A$1'})
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
bd23ea04dc04e328739dd86a97a65296c5e7aa4e | 3d91c09bca4e68bf7a527cb40ed70ac208495b93 | /library/templatetags/get_lended.py | dc9e1d23ec96ed29f427b8fd2cf57aa40028802e | [] | no_license | Kaik-a/OCR-Projet13 | 02e9d8c9228d6d7a09013b4ab2570304c01dfc28 | ac339002279397f43316e33a869cce797b5d92b2 | refs/heads/main | 2023-02-17T09:39:11.184120 | 2021-01-11T15:50:58 | 2021-01-11T15:50:58 | 311,875,691 | 0 | 0 | null | 2021-01-11T15:50:59 | 2020-11-11T05:51:34 | CSS | UTF-8 | Python | false | false | 590 | py | """Get lended games"""
from typing import Union
from django import template
from django.core.exceptions import ObjectDoesNotExist
from library.models import LendedGame
register = template.Library()
@register.filter(name="get_lended")
def get_lended(owned_game_id) -> Union[LendedGame, bool]:
"""
Get lended games.
:param owned_game_id: id of game owned
:rtype: Union[LendedGame, bool]
"""
try:
lended_game = LendedGame.objects.get(owned_game=owned_game_id, returned=False)
return lended_game
except ObjectDoesNotExist:
return False
| [
"mehdi.bichari@outscale.com"
] | mehdi.bichari@outscale.com |
87a207a046ec88b78484e7b0c816fccf6e4be3bc | ea05a89f4df49323eb630960c31bfbf3eb812e48 | /events/migrations/0001_initial.py | 7bae2e4599ed9154b47d3475390f388f1749a0db | [] | no_license | psteichen/aperta-cms-lts | 3dff06fbf17e4a8c4a124c826b36f083451d613e | cf46e82cd71e7acddb900e558bc155cdd7999d9c | refs/heads/master | 2021-01-20T00:08:06.012978 | 2017-08-11T13:58:10 | 2017-08-11T13:58:10 | 89,083,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 16:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import events.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Titre')),
('when', models.DateField(verbose_name='Date')),
('time', models.TimeField(verbose_name='Heure de début')),
('deadline', models.DateTimeField(verbose_name='Deadline')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='locations.Location', verbose_name='Lieu')),
],
),
migrations.CreateModel(
name='Invitation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(blank=True, max_length=5000, null=True)),
('attachement', models.FileField(blank=True, null=True, upload_to=events.models.rename_attach, verbose_name='Annexe(s)')),
('sent', models.DateTimeField(blank=True, null=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='events.Event')),
],
),
]
| [
"pst@libre.lu"
] | pst@libre.lu |
6ccd0fd80e07bc2409e0b4c4d5708a7161fb5fa9 | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/instruments/hawc/steps/__init__.py | dac31389261c9c2edcb7bfdcbb30788994698992 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,262 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['StepBinPixels', 'StepBgSubtract', 'StepCalibrate', 'StepCheckhead',
'StepCombine', 'StepDemodulate', 'StepDmdCut',
'StepDmdPlot', 'StepFlat', 'StepFluxjump', 'StepFocus',
'StepImgMap', 'StepIP', 'StepLabChop', 'StepLabPolPlots',
'StepMerge', 'StepMkflat', 'StepNodPolSub',
'StepNoiseFFT', 'StepNoisePlots', 'StepOpacity',
'StepPolDip', 'StepPolMap', 'StepPolVec', 'StepPrepare',
'StepRegion', 'StepRotate', 'StepScanMap',
'StepScanMapFlat', 'StepScanMapFocus', 'StepScanMapPol',
'StepScanStokes', 'StepShift',
'StepSkycal', 'StepSkydip', 'StepSplit', 'StepStdPhotCal',
'StepStokes', 'StepWcs', 'StepZeroLevel']
from sofia_redux.instruments.hawc.steps.stepbinpixels import *
from sofia_redux.instruments.hawc.steps.stepbgsubtract import *
from sofia_redux.instruments.hawc.steps.stepcalibrate import *
from sofia_redux.instruments.hawc.steps.stepcheckhead import *
from sofia_redux.instruments.hawc.steps.stepcombine import *
from sofia_redux.instruments.hawc.steps.stepdemodulate import *
from sofia_redux.instruments.hawc.steps.stepdmdcut import *
from sofia_redux.instruments.hawc.steps.stepdmdplot import *
from sofia_redux.instruments.hawc.steps.stepflat import *
from sofia_redux.instruments.hawc.steps.stepfluxjump import *
from sofia_redux.instruments.hawc.steps.stepfocus import *
from sofia_redux.instruments.hawc.steps.stepimgmap import *
from sofia_redux.instruments.hawc.steps.stepip import *
from sofia_redux.instruments.hawc.steps.steplabchop import *
from sofia_redux.instruments.hawc.steps.steplabpolplots import *
from sofia_redux.instruments.hawc.steps.stepmerge import *
from sofia_redux.instruments.hawc.steps.stepmkflat import *
from sofia_redux.instruments.hawc.steps.stepnodpolsub import *
from sofia_redux.instruments.hawc.steps.stepnoisefft import *
from sofia_redux.instruments.hawc.steps.stepnoiseplots import *
from sofia_redux.instruments.hawc.steps.stepopacity import *
from sofia_redux.instruments.hawc.steps.steppoldip import *
from sofia_redux.instruments.hawc.steps.steppolmap import *
from sofia_redux.instruments.hawc.steps.steppolvec import *
from sofia_redux.instruments.hawc.steps.stepprepare import *
from sofia_redux.instruments.hawc.steps.stepregion import *
from sofia_redux.instruments.hawc.steps.steprotate import *
from sofia_redux.instruments.hawc.steps.stepscanstokes import *
from sofia_redux.instruments.hawc.steps.stepscanmap import *
from sofia_redux.instruments.hawc.steps.stepscanmapflat import *
from sofia_redux.instruments.hawc.steps.stepscanmapfocus import *
from sofia_redux.instruments.hawc.steps.stepscanmappol import *
from sofia_redux.instruments.hawc.steps.stepshift import *
from sofia_redux.instruments.hawc.steps.stepskycal import *
from sofia_redux.instruments.hawc.steps.stepskydip import *
from sofia_redux.instruments.hawc.steps.stepsplit import *
from sofia_redux.instruments.hawc.steps.stepstdphotcal import *
from sofia_redux.instruments.hawc.steps.stepstokes import *
from sofia_redux.instruments.hawc.steps.stepwcs import *
from sofia_redux.instruments.hawc.steps.stepzerolevel import *
| [
"melanie.j.clarke@nasa.gov"
] | melanie.j.clarke@nasa.gov |
5aa75852540563db657a5c4fe15b75e585fcdfa2 | 37dd16e4e48511e5dab789c57d97ab47ccffd561 | /src/apps/domain/engagement_assignment/admin.py | b5e547f63e46b70a8c0c61b622b47c81dc3b62fd | [] | no_license | willow/scone-api | c9473a043996639024ae028bb3d7bf420eb3d75b | c786915bc0535cb0ed78726afa4ee3c0772a8c0e | refs/heads/production | 2016-09-05T18:43:22.953283 | 2014-08-18T23:16:47 | 2014-08-18T23:18:23 | 18,448,114 | 1 | 0 | null | 2014-08-08T16:40:35 | 2014-04-04T18:21:18 | Python | UTF-8 | Python | false | false | 769 | py | from django.contrib import admin
from src.apps.domain.engagement_assignment.models import AssignedProspect
class AssignedProspectAdmin(admin.ModelAdmin):
actions = None
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
# Allow viewing objects but not actually changing them
# https://gist.github.com/aaugustin/1388243
def has_change_permission(self, request, obj=None):
if request.method not in ('GET', 'HEAD'):
return False
return super().has_change_permission(request, obj)
def get_readonly_fields(self, request, obj=None):
return (self.fields or [f.name for f in self.model._meta.fields])
admin.site.register(AssignedProspect, AssignedProspectAdmin)
| [
"scoarescoare@gmail.com"
] | scoarescoare@gmail.com |
122fdbad2d4235c0448a9c191ffe41de3a7b7478 | fa064f5ef48b29dcf2e90d9e4e30199a32c5e2af | /case/theater/myparser/legacy.py | 97480ea593d418b0cf66e9aafc17bc502f89c3bf | [] | no_license | gsrr/Crawler | ed05971bf6be31f6dae32d6e82bbae9cb93a8d02 | 1e109eeaaf518e699e591fa8e72909e6f965be0c | refs/heads/master | 2020-04-07T06:40:08.492069 | 2017-06-13T03:42:04 | 2017-06-13T03:42:04 | 44,025,883 | 27 | 5 | null | null | null | null | UTF-8 | Python | false | false | 5,099 | py | # -*- coding: utf-8 -*-
import mylib
import re
import urlparse
import urllib
import parseplatform
import copy
def getContent(data, item):
if item == "price":
return data.replace("<span class='ticket_content'></span>", ",").replace("<br />", "\n")
return data
class Parser:
def __init__(self, paras):
self.url = paras['url']
self.queue = []
def extractTitle(self, data):
searchObj = re.search(r'alt="(.*?)"', data , re.M|re.I|re.S)
return searchObj.group(1)
def extractImage(self, data):
searchObj = re.search(r'src="(.*?)"', data , re.M|re.I|re.S)
return searchObj.group(1)
def extractURL(self, data):
searchObj = re.search(r'href="(.*?)"', data , re.M|re.I|re.S)
return urlparse.urljoin(self.url, searchObj.group(1))
def download(self, url_content, url_image):
file_id = url_content.split("/")[-1]
with open("image/%s"%file_id, "w") as fw:
fr = urllib.urlopen(url_image)
data = fr.read()
fw.write(data)
return file_id
def extractPoster(self, data):
contents = []
items = re.findall(r'<a class="poster"(.*?)</a>', data , re.M|re.I|re.S)
for item in items:
content = {}
content['url_content'] = self.extractURL(item)
content['title'] = self.extractTitle(item)
content['url_image'] = self.extractImage(item) #download image
content['image_id'] = self.download(content['url_content'], content['url_image'])
contents.append(copy.deepcopy(content))
return contents
def extractPrice(self, data, contents):
data_dic = {
"票價" : "price",
"場地" : "place",
"開始" : "start_time",
}
items = re.findall(r'<th>(.*?)</th><td>(.*?)</td>', data , re.M|re.I|re.S)
cnt = 0
for item in items:
if item[0] in data_dic.keys():
content = contents[cnt/3]
content[data_dic[item[0]]] = getContent(item[1], data_dic[item[0]])
cnt += 1
else:
pass
def extractDate(self, data, contents):
items = re.findall(r'<div class="m">(.*?)</div>(.*?)<div class="d">(.*?)</div>(.*?)<div class="week">(.*?)</div>', data , re.M|re.I|re.S)
cnt = 0
for item in items:
content = contents[cnt]
content["start_date"] = item[0] + "/" + item[2]
cnt += 1
def _parse_content(self, url, content):
print url
data = mylib.myurl(url)
data_ret = ""
place = 0
price = 0
start_date = 0
start_time = 0
for line in data:
if "<title>" in line:
searchObj = re.search(r'<title>(.*?)</title>', line , re.M|re.I|re.S)
if searchObj:
content['title'] = searchObj.group(1)
if "alignnone" in line:
searchObj = re.search(r'src="(.*?)"', line , re.M|re.I|re.S)
if searchObj:
content['url_image'] = searchObj.group(1)
content['image_id'] = searchObj.group(1).split("/")[-1]
if place == 1 and "</p>" in line:
content['place'] = line.strip().rstrip("</p>")
place = 0
if price == 1 and '</p>' in line:
content['price'] = line.strip().rstrip("</p>")
price = 0
if start_date == 1 and '</p>' in line:
content['start_date'] = line.strip().rstrip("</p>")
start_date = 0
if start_time == 1 and '</p>' in line:
content['start_time'] = line.strip().rstrip("</p>")
start_time = 0
if "演出場地" in line:
place = 1
if "演出票價" in line:
price = 1
if "演出日期" in line:
start_date = 1
if "演出開始" in line:
start_time = 1
def parse(self):
data = mylib.myurl(self.url)
data_ret = ""
contents = []
for line in data:
if "galleries-slide-sub-title1" in line:
concert = {}
concert['url_content'] = self.extractURL(line)
self._parse_content(concert['url_content'], concert)
contents.append(copy.deepcopy(concert))
self.write(contents)
def write(self, data):
with open("result/legacy.result", "a") as fw:
for content in data:
fw.write("--start--\n")
for key in content.keys():
if key == "price":
fw.write(key + "=" + content[key].replace("\n", "::") + "\n")
else:
fw.write(key + "=" + content[key] + "\n")
fw.write("--end--\n\n")
def start(self):
return self.parse()
| [
"jerrycheng1128@gmail.com"
] | jerrycheng1128@gmail.com |
b0e8975a3841436d1056fbc76ca39921dc2e3f5b | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/token/Schema+Instance/NISTXML-SV-IV-atomic-token-enumeration-3-2.py | 617a90346e1a437b02be47eb86f0aa348ed0dd9c | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 511 | py | from output.models.nist_data.atomic.token.schema_instance.nistschema_sv_iv_atomic_token_enumeration_3_xsd.nistschema_sv_iv_atomic_token_enumeration_3 import NistschemaSvIvAtomicTokenEnumeration3
from output.models.nist_data.atomic.token.schema_instance.nistschema_sv_iv_atomic_token_enumeration_3_xsd.nistschema_sv_iv_atomic_token_enumeration_3 import NistschemaSvIvAtomicTokenEnumeration3Type
obj = NistschemaSvIvAtomicTokenEnumeration3(
value=NistschemaSvIvAtomicTokenEnumeration3Type.STANDARDIZATION
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
d61c6018bc90c2c057e06d9f8d891a4a72b7b642 | 77f63e447ef93bd77ce4315b6d4220da86abffdf | /setup.py | cf8f789dcd9fdad981b8de82177b1b99dc6dbd2a | [
"WTFPL"
] | permissive | wsxxhx/TorchSUL | 8d1625989b5f5ef5aeb879e01019ddf850848961 | 46ee6aab4367d8a02ddb6de66d24455dbfa465c4 | refs/heads/master | 2023-05-25T16:52:10.321801 | 2021-06-12T09:34:56 | 2021-06-12T09:34:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup_args = dict(
name='TorchSUL',
version='0.1.26',
description='Simple but useful layers for Pytorch',
packages=find_packages(),
long_description=long_description,
long_description_content_type="text/markdown",
author='Cheng Yu',
author_email='chengyu996@gmail.com',
url='https://github.com/ddddwee1/TorchSUL',
install_requires = [
'tqdm',
'progressbar2',
'opencv-python',
'matplotlib',
]
)
install_requires = []
if __name__ == '__main__':
setup(**setup_args)
| [
"cy960823@outlook.com"
] | cy960823@outlook.com |
f9018da09e837ffe4443e04232a0b7cf548b49d9 | 472905e7a5f26465af4eee0fcfaa592de52eed17 | /server/apps/memories/migrations/0002_auto_20191201_1010.py | 824245ab101e92788bc10412075f16655229495e | [] | no_license | backpropogation/memories | 7c72bfeca8a4ab07a2c19960c5af91ed4da24304 | 0da75bcffccbe5d3f4e2d5b30ee3f224f70aa81b | refs/heads/master | 2022-12-24T08:27:24.838744 | 2019-12-01T16:14:04 | 2019-12-01T16:14:04 | 225,188,993 | 0 | 0 | null | 2019-12-01T16:06:24 | 2019-12-01T16:03:37 | JavaScript | UTF-8 | Python | false | false | 756 | py | # Generated by Django 2.2.1 on 2019-12-01 10:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('memories', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='memory',
options={'ordering': ('-posted_at',)},
),
migrations.AlterField(
model_name='memory',
name='latitude',
field=models.DecimalField(decimal_places=20, max_digits=22, verbose_name='Latitude'),
),
migrations.AlterField(
model_name='memory',
name='longitude',
field=models.DecimalField(decimal_places=20, max_digits=23, verbose_name='Longitude'),
),
]
| [
"jack.moriarty@mail.ru"
] | jack.moriarty@mail.ru |
64ba6ec501d975d541fea1dd55faa1b766c24658 | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/bin/django-admin | b9210326fb809c5ec6c08d9923a99c16f9a46121 | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 349 | #!/home/mymy/Desktop/Python_agility/cours/Hugo/Lessons_RestAPI/Lesson_RestAPI/djangoenv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr | |
2401aaa2a42cd0e7892df3be26c15c536640cfef | ccb81eef3cd4f5562cab89b51695756ab8dbc736 | /message_ler_17076/wsgi.py | f1c921791698157a52064050940c5b21f2e0dd40 | [] | no_license | crowdbotics-apps/message-ler-17076 | 4e847a2ccd333b77414bf9b913e7705c203accbb | 135b68325e04caf669fd9fe281244c460e71068c | refs/heads/master | 2023-05-16T11:16:10.937234 | 2020-05-16T19:12:53 | 2020-05-16T19:12:53 | 264,508,470 | 0 | 0 | null | 2021-06-10T11:01:47 | 2020-05-16T19:11:07 | Python | UTF-8 | Python | false | false | 411 | py | """
WSGI config for message_ler_17076 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "message_ler_17076.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
24ce83491fbb7e1866d1b8b4a18150fc82c28ee5 | 79479634cd8da72fc912b11ec43e237726f7c5e5 | /scripts/subreddit_submissions.py | 5d74fcf29545b78a2ba70950c7140671f3e4f73d | [
"MIT"
] | permissive | PhantomInsights/subreddit-analyzer | 594235169f58d5b2c9de886625464ced5be02790 | ba5e6250797515d664d6cfa6df011f8cec1b2729 | refs/heads/master | 2020-11-25T10:46:13.771188 | 2020-04-02T12:50:12 | 2020-04-02T12:50:12 | 228,625,357 | 513 | 47 | MIT | 2019-12-24T13:35:32 | 2019-12-17T13:42:42 | Python | UTF-8 | Python | false | false | 2,915 | py | """
This script uses the Pushshift API to download posts from the specified subreddits.
By default it downloads 10,000 posts starting from the newest one.
"""
import csv
import time
from datetime import datetime
import requests
import tldextract
SUBREDDITS = ["mexico"]
HEADERS = {"User-Agent": "Submissions Downloader v0.2"}
SUBMISSIONS_LIST = list()
MAX_SUBMISSIONS = 10000
def init():
"""Iterates over all the subreddits and creates their csv files."""
for subreddit in SUBREDDITS:
writer = csv.writer(open("./{}-submissions.csv".format(subreddit),
"w", newline="", encoding="utf-8"))
# Adding the header.
writer.writerow(["datetime", "author", "title", "url", "domain"])
print("Downloading:", subreddit)
download_submissions(subreddit=subreddit)
writer.writerows(SUBMISSIONS_LIST)
SUBMISSIONS_LIST.clear()
def download_submissions(subreddit, latest_timestamp=None):
"""Keeps downloading submissions using recursion, it downloads them 500 at a time.
Parameters
----------
subreddit : str
The desired subreddit.
latest_timestamp: int
The timestampf of the latest comment.
"""
base_url = "https://api.pushshift.io/reddit/submission/search/"
params = {"subreddit": subreddit, "sort": "desc",
"sort_type": "created_utc", "size": 500}
# After the first call of this function we will use the 'before' parameter.
if latest_timestamp != None:
params["before"] = latest_timestamp
with requests.get(base_url, params=params, headers=HEADERS) as response:
json_data = response.json()
total_submissions = len(json_data["data"])
latest_timestamp = 0
print("Downloading: {} submissions".format(total_submissions))
for item in json_data["data"]:
# We will only take 3 properties, the timestamp, author and url.
latest_timestamp = item["created_utc"]
iso_date = datetime.fromtimestamp(latest_timestamp)
tld = tldextract.extract(item["url"])
domain = tld.domain + "." + tld.suffix
if item["is_self"] == True:
domain = "self-post"
if domain == "youtu.be":
domain = "youtube.com"
if domain == "redd.it":
domain = "reddit.com"
SUBMISSIONS_LIST.append(
[iso_date, item["author"], item["title"], item["url"], domain])
if len(SUBMISSIONS_LIST) >= MAX_SUBMISSIONS:
break
if total_submissions < 500:
print("No more results.")
elif len(SUBMISSIONS_LIST) >= MAX_SUBMISSIONS:
print("Download complete.")
else:
time.sleep(1.2)
download_submissions(subreddit, latest_timestamp)
if __name__ == "__main__":
init()
| [
"phantom@phantom.im"
] | phantom@phantom.im |
1e189ea6efb35bf1d8f0ab74b3f558a83fc5ab98 | 35b460a5e72e3cb40681861c38dc6d5df1ae9b92 | /CodeFights/Arcade/Intro/throughTheFog/circleOfNumbers.py | aef33d190af0cb87515c13db64c6284b45fdb0e4 | [] | no_license | robgoyal/CodingChallenges | 9c5f3457a213cf54193a78058f74fcf085ef25bc | 0aa99d1aa7b566a754471501945de26644558d7c | refs/heads/master | 2021-06-23T09:09:17.085873 | 2019-03-04T04:04:59 | 2019-03-04T04:04:59 | 94,391,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # Name: circleOfNumbers.py
# Author: Robin Goyal
# Last-Modified: July 13, 2017
# Purpose: Given a circular radius n and an input number,
# find the number which is opposite the input number
def circleOfNumbers(n, firstNumber):
return (firstNumber + n / 2) % n | [
"goyal.rob@gmail.com"
] | goyal.rob@gmail.com |
2e6d663e1f9f0847d490f1af6ae277c83881bfaa | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/twentemilieu/calendar.py | f4d1e51b171eedd978a3de9419f2265296cd7c7b | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 3,653 | py | """Support for Twente Milieu Calendar."""
from __future__ import annotations
from datetime import date, datetime, timedelta
from twentemilieu import WasteType
from homeassistant.components.calendar import CalendarEntity, CalendarEvent
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
import homeassistant.util.dt as dt_util
from .const import DOMAIN, WASTE_TYPE_TO_DESCRIPTION
from .entity import TwenteMilieuEntity
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Twente Milieu calendar based on a config entry."""
coordinator = hass.data[DOMAIN][entry.data[CONF_ID]]
async_add_entities([TwenteMilieuCalendar(coordinator, entry)])
class TwenteMilieuCalendar(TwenteMilieuEntity, CalendarEntity):
"""Defines a Twente Milieu calendar."""
_attr_has_entity_name = True
_attr_icon = "mdi:delete-empty"
_attr_name = None
def __init__(
self,
coordinator: DataUpdateCoordinator[dict[WasteType, list[date]]],
entry: ConfigEntry,
) -> None:
"""Initialize the Twente Milieu entity."""
super().__init__(coordinator, entry)
self._attr_unique_id = str(entry.data[CONF_ID])
self._event: CalendarEvent | None = None
@property
def event(self) -> CalendarEvent | None:
"""Return the next upcoming event."""
return self._event
async def async_get_events(
self, hass: HomeAssistant, start_date: datetime, end_date: datetime
) -> list[CalendarEvent]:
"""Return calendar events within a datetime range."""
events: list[CalendarEvent] = []
for waste_type, waste_dates in self.coordinator.data.items():
events.extend(
CalendarEvent(
summary=WASTE_TYPE_TO_DESCRIPTION[waste_type],
start=waste_date,
end=waste_date + timedelta(days=1),
)
for waste_date in waste_dates
if start_date.date() <= waste_date <= end_date.date()
)
return events
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
next_waste_pickup_type = None
next_waste_pickup_date = None
for waste_type, waste_dates in self.coordinator.data.items():
if (
waste_dates
and (
next_waste_pickup_date is None
or waste_dates[0] # type: ignore[unreachable]
< next_waste_pickup_date
)
and waste_dates[0] >= dt_util.now().date()
):
next_waste_pickup_date = waste_dates[0]
next_waste_pickup_type = waste_type
self._event = None
if next_waste_pickup_date is not None and next_waste_pickup_type is not None:
self._event = CalendarEvent(
summary=WASTE_TYPE_TO_DESCRIPTION[next_waste_pickup_type],
start=next_waste_pickup_date,
end=next_waste_pickup_date + timedelta(days=1),
)
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
0750081985acee30b91acb274a9231071fbedfeb | d531f502304f4f314a2e5d2e28a98d184b143e42 | /elastic/documents.py | 498ceec138557a0e21834f5db9bfdec2efd61514 | [] | no_license | mikohan/djangoblogtest | 6a0d4704fb51a8ca935ea329cb007b7e48fecb54 | 3af986cd8be3d83ad5b01d2583464e243309dd50 | refs/heads/master | 2020-06-13T15:39:13.686997 | 2019-07-02T05:25:06 | 2019-07-02T05:25:06 | 194,696,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django_elasticsearch_dsl import DocType, Index
from elastic.models import Post
post = Index('posts')
@post.doc_type
class PostDocument(DocType):
class Meta:
model = Post
fields = [
'title',
'content',
'timestamp'
] | [
"angara99@gmail.com"
] | angara99@gmail.com |
44338e41bb946806695a6fb18d70a1d6fa64fd0e | 5e989188eb0cfde46f57e033679bd7817eae6620 | /liteeth/phy/common.py | a290dfb1bd84798bfb3c5ab3213b22d8bb6a7865 | [
"BSD-2-Clause"
] | permissive | telantan/liteeth | 1f85b086a7740013f4adfcecc92644fd147085e3 | 73bd27b506211f12f8c515ad93a3cc65a3624dc3 | refs/heads/master | 2020-12-13T16:37:47.229699 | 2020-01-16T14:29:49 | 2020-01-16T14:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # This file is Copyright (c) 2015-2018 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
from liteeth.common import *
from migen.genlib.cdc import MultiReg
from migen.fhdl.specials import Tristate
class LiteEthPHYHWReset(Module):
def __init__(self):
self.reset = Signal()
# # #
counter = Signal(max=512)
counter_done = Signal()
counter_ce = Signal()
self.sync += If(counter_ce, counter.eq(counter + 1))
self.comb += [
counter_done.eq(counter == 256),
counter_ce.eq(~counter_done),
self.reset.eq(~counter_done)
]
class LiteEthPHYMDIO(Module, AutoCSR):
def __init__(self, pads):
self._w = CSRStorage(3, name="w")
self._r = CSRStatus(1, name="r")
# # #
data_w = Signal()
data_oe = Signal()
data_r = Signal()
self.comb +=[
pads.mdc.eq(self._w.storage[0]),
data_oe.eq(self._w.storage[1]),
data_w.eq(self._w.storage[2])
]
self.specials += [
MultiReg(data_r, self._r.status[0]),
Tristate(pads.mdio, data_w, data_oe, data_r)
]
| [
"florent@enjoy-digital.fr"
] | florent@enjoy-digital.fr |
521703d00dde8fdf3755df816b625447ecc002e5 | 4a0c047f73458d089dc62bc2be7c3bd098a08ee2 | /data_structor/datetime_format.py | b7c87e2848cb10ed5974e929f4beb7d26f13f2f4 | [] | no_license | sunghyungi/pandas_study | b53e53d88abe733b292c06e2658e2fa21428ffca | b861724995914a4a4644c8b08b3b38070d5abc51 | refs/heads/master | 2020-11-28T02:05:22.565760 | 2020-01-08T03:09:37 | 2020-01-08T03:09:37 | 229,675,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | import pandas as pd
df = pd.read_csv('stock-data.csv')
pd.set_option('display.max_columns', 15)
pd.set_option('display.max_colwidth', 20)
pd.set_option('display.unicode.east_asian_width', True)
pd.set_option('display.width', 600)
print("# 문자열인 날짜 데이터를 판다스 Timestamp로 변환")
df['new_Date'] = pd.to_datetime(df['Date'])
print(df, '\n')
print("# dt속성을 이용하여 new_Date 열의 년월일 정보를 년, 월, 일로 구분")
df['Year'] = df['new_Date'].dt.year
df['Month'] = df['new_Date'].dt.month
df['Day'] = df['new_Date'].dt.day
print(df, '\n')
print("# Timestamp를 Period로 변환하여 년월일 표기 변경하기")
df['Date_yr'] = df['new_Date'].dt.to_period(freq='A')
df['Date_m'] = df['new_Date'].dt.to_period(freq='M')
print(df, '\n')
print("# 원하는 열을 새로운 행 인덱스로 지정")
df.set_index('Date_m', inplace=True)
print(df) | [
"tjdgusrlek@gmail.com"
] | tjdgusrlek@gmail.com |
63b51da8acb55197f2b5cf0b3f435534ad187add | bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed | /Deep-Learning/scratch/chapter02/ex01.py | 2c5dc92731ed2fa6ccaecca539c46eeb6749499f | [] | no_license | juneglee/Deep_Learning | fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8 | 17a448cf6a7c5b61b967dd78af3d328d63378205 | refs/heads/master | 2023-07-15T03:02:55.739619 | 2021-08-19T14:04:55 | 2021-08-19T14:04:55 | 273,253,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # 시그모이드 함수 구현하기
import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1/(1 + np.exp(-x))
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1)
plt.show()
# 시그모이드 = s자 모양 함수
# 계단형 함수와 시그모이드 함수는 둘다 비선형 함수이다.
| [
"klcpop1@gmail.com"
] | klcpop1@gmail.com |
555f97472640862d217d8dc672bc2c246ae663fb | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_158/624.py | 2a3abc64d2cdf70a551ac5e03e6ac90bbaed9216 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | #!/usr/bin/python
import requests, logging, string, sys
def createOutput(result):
f = open(sys.argv[2], "w")
for i in range(0, len(result)):
f.write("Case #" + str(i + 1) + ": " + result[i] + "\n")
f.close();
return
def processResults(X, R, C):
volume = R * C
if volume % X != 0:
return "RICHARD"
if X == 1 or X == 2:
return "GABRIEL"
if X == 3:
if R == 1 or C == 1:
return "RICHARD"
else:
return "GABRIEL"
if X == 4:
if (R == 4 and C == 4) or (R == 4 and C == 3) or (R == 3 and C == 4):
return "GABRIEL"
else:
return "RICHARD"
def processInput(inputlines):
result = []
for line in inputlines:
values = line.split(' ')
X = int(values[0])
R = int(values[1])
C = int(values[2])
result.append(processResults(X, R, C))
return result
def readInput():
inputlines = []
f = open(sys.argv[1])
testcases = int(f.readline().strip())
for i in range(0, testcases):
line = f.readline().strip()
inputlines.append(line)
f.close()
return inputlines
if __name__ == '__main__':
inputlines = readInput()
result = processInput(inputlines)
createOutput(result)
sys.exit()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cfd620bb6fbedecd779cce1cc00f2d22eddeb425 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Generators/MadGraphModels/python/models/HeavyHiggsTHDM/__init__.py | 948b99c911ee93b63f7295fe1003ccaa5fc58319 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py |
import particles
import couplings
import lorentz
import parameters
import vertices
import coupling_orders
import write_param_card
import propagators
all_particles = particles.all_particles
all_vertices = vertices.all_vertices
all_couplings = couplings.all_couplings
all_lorentz = lorentz.all_lorentz
all_parameters = parameters.all_parameters
all_orders = coupling_orders.all_orders
all_functions = function_library.all_functions
all_propagators = propagators.all_propagators
try:
import decays
except ImportError:
pass
else:
all_decays = decays.all_decays
try:
import form_factors
except ImportError:
pass
else:
all_form_factors = form_factors.all_form_factors
try:
import CT_vertices
except ImportError:
pass
else:
all_CTvertices = CT_vertices.all_CTvertices
gauge = [0]
__author__ = "N. Christensen, C. Duhr, B. Fuks"
__date__ = "21. 11. 2012"
__version__= "1.4.5"
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
d36ac047086b61bb183185f54828352106cbdb9e | c7a94e7b1956c79f3c390508e60902a6bb56f3c5 | /xlsxwriter/core.py | 905736f039167206a9cfb1549c151f8e084c2bb7 | [
"BSD-2-Clause"
] | permissive | alexander-beedie/XlsxWriter | 635b68d98683efb8404d58f5d896f8e6d433e379 | 03f76666df9ce5ac0ab6bb8ff866d424dc8fea58 | refs/heads/main | 2023-05-27T15:33:36.911705 | 2023-05-04T00:00:04 | 2023-05-04T00:00:04 | 144,862,072 | 0 | 0 | null | 2018-08-15T14:15:51 | 2018-08-15T14:15:50 | null | UTF-8 | Python | false | false | 5,656 | py | ###############################################################################
#
# Core - A class for writing the Excel XLSX Worksheet file.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2023, John McNamara, jmcnamara@cpan.org
#
# Standard packages.
from datetime import datetime
# Package imports.
from . import xmlwriter
class Core(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Core file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Core, self).__init__()
self.properties = {}
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
self._write_cp_core_properties()
self._write_dc_title()
self._write_dc_subject()
self._write_dc_creator()
self._write_cp_keywords()
self._write_dc_description()
self._write_cp_last_modified_by()
self._write_dcterms_created()
self._write_dcterms_modified()
self._write_cp_category()
self._write_cp_content_status()
self._xml_end_tag("cp:coreProperties")
# Close the file.
self._xml_close()
def _set_properties(self, properties):
# Set the document properties.
self.properties = properties
def _datetime_to_iso8601_date(self, date):
# Convert to a ISO 8601 style "2010-01-01T00:00:00Z" date.
if not date:
date = datetime.utcnow()
return date.strftime("%Y-%m-%dT%H:%M:%SZ")
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_cp_core_properties(self):
# Write the <cp:coreProperties> element.
xmlns_cp = (
"http://schemas.openxmlformats.org/package/2006/"
+ "metadata/core-properties"
)
xmlns_dc = "http://purl.org/dc/elements/1.1/"
xmlns_dcterms = "http://purl.org/dc/terms/"
xmlns_dcmitype = "http://purl.org/dc/dcmitype/"
xmlns_xsi = "http://www.w3.org/2001/XMLSchema-instance"
attributes = [
("xmlns:cp", xmlns_cp),
("xmlns:dc", xmlns_dc),
("xmlns:dcterms", xmlns_dcterms),
("xmlns:dcmitype", xmlns_dcmitype),
("xmlns:xsi", xmlns_xsi),
]
self._xml_start_tag("cp:coreProperties", attributes)
def _write_dc_creator(self):
# Write the <dc:creator> element.
data = self.properties.get("author", "")
self._xml_data_element("dc:creator", data)
def _write_cp_last_modified_by(self):
# Write the <cp:lastModifiedBy> element.
data = self.properties.get("author", "")
self._xml_data_element("cp:lastModifiedBy", data)
def _write_dcterms_created(self):
# Write the <dcterms:created> element.
date = self.properties.get("created", datetime.utcnow())
xsi_type = "dcterms:W3CDTF"
date = self._datetime_to_iso8601_date(date)
attributes = [
(
"xsi:type",
xsi_type,
)
]
self._xml_data_element("dcterms:created", date, attributes)
def _write_dcterms_modified(self):
# Write the <dcterms:modified> element.
date = self.properties.get("created", datetime.utcnow())
xsi_type = "dcterms:W3CDTF"
date = self._datetime_to_iso8601_date(date)
attributes = [
(
"xsi:type",
xsi_type,
)
]
self._xml_data_element("dcterms:modified", date, attributes)
def _write_dc_title(self):
# Write the <dc:title> element.
if "title" in self.properties:
data = self.properties["title"]
else:
return
self._xml_data_element("dc:title", data)
def _write_dc_subject(self):
# Write the <dc:subject> element.
if "subject" in self.properties:
data = self.properties["subject"]
else:
return
self._xml_data_element("dc:subject", data)
def _write_cp_keywords(self):
# Write the <cp:keywords> element.
if "keywords" in self.properties:
data = self.properties["keywords"]
else:
return
self._xml_data_element("cp:keywords", data)
def _write_dc_description(self):
# Write the <dc:description> element.
if "comments" in self.properties:
data = self.properties["comments"]
else:
return
self._xml_data_element("dc:description", data)
def _write_cp_category(self):
# Write the <cp:category> element.
if "category" in self.properties:
data = self.properties["category"]
else:
return
self._xml_data_element("cp:category", data)
def _write_cp_content_status(self):
# Write the <cp:contentStatus> element.
if "status" in self.properties:
data = self.properties["status"]
else:
return
self._xml_data_element("cp:contentStatus", data)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
c7f48720bb0d186381903465c450342a3c0e979a | 2b82b45edf199488e45cef97571e57dff4a3e824 | /programs/spectralnorm/spectralnorm-numba-2.py | e373f4c513a9114fd405c25ea2883f5c37f7a01e | [
"BSD-3-Clause"
] | permissive | abilian/python-benchmarks | cf8b82d97c0836c65ff00337b649a53bc9af965e | 37a519a2ee835cf53ca0bb78e7e7c83da69d664e | refs/heads/main | 2023-08-05T18:50:53.490042 | 2021-10-01T10:49:33 | 2021-10-01T10:49:33 | 321,367,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # The Computer Language Benchmarks Game
# http://benchmarksgame.alioth.debian.org/
#
# Contributed by Sebastien Loisel
# Fixed by Isaac Gouy
# Sped up by Josh Goldfoot
# Dirtily sped up by Simon Descarpentries
# Sped up with numpy by Kittipong Piyawanno
# 2to3
from numba import jit
from sys import argv
from numpy import *
@jit
def spectralnorm(n):
u = matrix(ones(n))
j = arange(n)
eval_func = lambda i: 1.0 / ((i + j) * (i + j + 1) / 2 + i + 1)
M = matrix([eval_func(i) for i in arange(n)])
MT = M.T
for i in range(10):
v = (u * MT) * M
u = (v * MT) * M
print("%0.9f" % (sum(u * v.T) / sum(v * v.T)) ** 0.5)
spectralnorm(int(argv[1]))
| [
"sf@fermigier.com"
] | sf@fermigier.com |
7d21255d581e353aca38239ac109c88f33e37acd | 13a5a2ab12a65d65a5bbefce5253c21c6bb8e780 | /dnainfo/crimemaps/migrations/0024_nycschoolswatertesting.py | 8679bb07a5f901d7e3c0d9f930f5754ade2f377c | [] | no_license | NiJeLorg/DNAinfo-CrimeMaps | 535b62205fe1eb106d0f610d40f2f2a35e60a09e | 63f3f01b83308294a82565f2dc8ef6f3fbcdb721 | refs/heads/master | 2021-01-23T19:28:12.642479 | 2017-05-11T06:04:08 | 2017-05-11T06:04:08 | 34,847,724 | 2 | 0 | null | 2016-11-25T15:56:14 | 2015-04-30T10:02:41 | JavaScript | UTF-8 | Python | false | false | 1,689 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('crimemaps', '0023_nyctrainsitstand'),
]
operations = [
migrations.CreateModel(
name='NYCschoolsWaterTesting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now=True)),
('lc', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('bc', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('ln', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('bn', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('add', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('cit', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('stc', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('zip', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('wtp', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('er', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('dohm', models.CharField(default=b'', max_length=255, null=True, blank=True)),
('note', models.CharField(default=b'', max_length=255, null=True, blank=True)),
],
),
]
| [
"jd@nijel.org"
] | jd@nijel.org |
5559ceb97c6a0b3e8d421323d1656c568b83aa72 | 912c4445e7041869d1c8535a493b78d7ee35424b | /status/tests.py | 034f0f6f61543636b32a1de55226078cabb8a2f1 | [] | no_license | maltezc/Udemy-DjangoRestAPI | 3f243ec97ea5e8e9d6ddc2005986b6a05aa11097 | de6f885cf0cddaf22fb6fd72d18fc805b9ce48d2 | refs/heads/master | 2022-12-14T06:04:43.011691 | 2018-08-05T01:10:17 | 2018-08-05T01:10:17 | 140,590,753 | 0 | 0 | null | 2022-11-22T02:48:04 | 2018-07-11T14:56:08 | Python | UTF-8 | Python | false | false | 602 | py |
from django.test import TestCase
from django.contrib.auth import get_user_model
from status.models import Status
User = get_user_model()
class StatusTestCase(TestCase):
def setUp(self):
user = User.objects.create(username='cfe', email='hello@cfe.com')
user.set_password("yeahhhcfe")
user.save()
def test_creating_status(self):
user = User.objects.get(username='cfe')
obj = Status.objects.create(user=user, content='Some cool new content')
self.assertEqual(obj.id, 1)
qs = Status.objects.all()
self.assertEqual(qs.count(), 1) | [
"cflux.maltez@live.com"
] | cflux.maltez@live.com |
564a8dc4667af1a3fb616136ef6739ca876695e8 | c9c94fcc33b25ebef73ce7c117ea20721a504701 | /tests/spatial_operator/test_rectangle_knn.py | 1f9a5b64d1da90b85cfa670891069534998dfa1b | [
"Apache-2.0"
] | permissive | Imbruced/geo_pyspark | 46468cc95658fa156144246a45df32116d7ff20e | 26da16d48168789c5f2bb75b5fdec1f515bf9cb1 | refs/heads/master | 2022-12-16T18:56:54.675038 | 2020-02-24T20:32:38 | 2020-02-24T20:32:38 | 204,563,687 | 8 | 3 | Apache-2.0 | 2022-12-08T03:32:18 | 2019-08-26T21:21:00 | Python | UTF-8 | Python | false | false | 2,730 | py | import os
import pytest
from shapely.geometry import Point
from geo_pyspark.core.SpatialRDD import RectangleRDD
from geo_pyspark.core.enums import IndexType, FileDataSplitter
from geo_pyspark.core.geom_types import Envelope
from geo_pyspark.core.spatialOperator import KNNQuery
from tests.test_base import TestBase
from tests.tools import tests_path, distance_sorting_functions
inputLocation = os.path.join(tests_path, "resources/zcta510-small.csv")
queryWindowSet = os.path.join(tests_path, "resources/zcta510-small.csv")
offset = 0
splitter = FileDataSplitter.CSV
gridType = "rtree"
indexType = "rtree"
numPartitions = 11
distance = 0.001
queryPolygonSet = os.path.join(tests_path, "resources/primaryroads-polygon.csv")
inputCount = 3000
inputBoundary = Envelope(-171.090042, 145.830505, -14.373765, 49.00127)
matchCount = 17599
matchWithOriginalDuplicatesCount = 17738
class TestRectangleKNN(TestBase):
query_envelope = Envelope(-90.01, -80.01, 30.01, 40.01)
loop_times = 5
query_point = Point(-84.01, 34.01)
top_k = 100
def test_spatial_knn_query(self):
rectangle_rdd = RectangleRDD(self.sc, inputLocation, offset, splitter, True)
for i in range(self.loop_times):
result = KNNQuery.SpatialKnnQuery(rectangle_rdd, self.query_point, self.top_k, False)
assert result.__len__() > -1
assert result[0].getUserData() is not None
def test_spatial_knn_query_using_index(self):
rectangle_rdd = RectangleRDD(self.sc, inputLocation, offset, splitter, True)
rectangle_rdd.buildIndex(IndexType.RTREE, False)
for i in range(self.loop_times):
result = KNNQuery.SpatialKnnQuery(rectangle_rdd, self.query_point, self.top_k, False)
assert result.__len__() > -1
assert result[0].getUserData() is not None
def test_spatial_knn_query_correctness(self):
rectangle_rdd = RectangleRDD(self.sc, inputLocation, offset, splitter, True)
result_no_index = KNNQuery.SpatialKnnQuery(rectangle_rdd, self.query_point, self.top_k, False)
rectangle_rdd.buildIndex(IndexType.RTREE, False)
result_with_index = KNNQuery.SpatialKnnQuery(rectangle_rdd, self.query_point, self.top_k, True)
sorted_result_no_index = sorted(result_no_index, key=lambda geo_data: distance_sorting_functions(
geo_data, self.query_point))
sorted_result_with_index = sorted(result_with_index, key=lambda geo_data: distance_sorting_functions(
geo_data, self.query_point))
difference = 0
for x in range(self.top_k):
difference += sorted_result_no_index[x].geom.distance(sorted_result_with_index[x].geom)
assert difference == 0
| [
"pawel93kocinski@gmail.com"
] | pawel93kocinski@gmail.com |
0cc4a2fbc3d553407bee5160d9a38847be8d9dd1 | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L3C/3C-7G_MD_NVT_rerun/set_7.py | 0f46d20e708063854de9926a0271f971f5bca6db | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L3C/MD_NVT_rerun/ti_one-step/3C_7G/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_7.in'
temp_pbs = filesdir + 'temp_7.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_7.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_7.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
e989e386a134506b91ae97587f771f0c11f17115 | 3fa8eead6e001c4d5a6dc5b1fd4c7b01d7693292 | /ros_final_exam/src/path_exam/src/drone_takeoff.py | 72614d2c3d72572785aac0f590f96c7dbe3cb835 | [] | no_license | MarzanShuvo/Ros_from_the_construct | 09261902841cdd832672658947790ec5fbba4cd3 | 4798234284d9d0bab3751e9d8ac2df95ae34a5bf | refs/heads/master | 2023-08-24T17:28:09.182113 | 2021-10-23T07:57:02 | 2021-10-23T07:57:02 | 339,105,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | #! /usr/bin/env python
import rospy
from std_msgs.msg import Empty
pub = rospy.Publisher('/drone/takeoff', Empty, queue_size=1)
rospy.init_node('taking_off', anonymous=True)
i=0
takeoff_msg = Empty()
while not (i==3):
rospy.loginfo("Taking off....... ")
pub.publish(takeoff_msg)
rospy.sleep(1)
i +=1
| [
"marzanalam3@gmail.com"
] | marzanalam3@gmail.com |
4fc4e6b6c216cafeb53c0703782cfe3a9f1fdd53 | 730f89724aca038c15191f01d48e995cb94648bc | /entrances/migrations/0009_auto_20141110_1309.py | 17928096718feb7f3db7ab280502d06f3d51621d | [] | no_license | Happyandhappy/django_email | 14bc3f63376f2568754292708ec8ca7f2e2cf195 | ea858c9fac79112542551b7ba6e899e348f24de3 | refs/heads/master | 2020-03-22T14:22:08.431334 | 2018-07-21T13:41:23 | 2018-07-21T13:41:23 | 140,174,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('entrances', '0008_auto_20141110_1307'),
]
operations = [
migrations.AddField(
model_name='apartment',
name='apartment_integer',
field=models.IntegerField(null=True, editable=False, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='apartment',
name='apartment',
field=models.CharField(max_length=255, verbose_name='Apartment'),
preserve_default=True,
),
]
| [
"greyfrapp@gmail.com"
] | greyfrapp@gmail.com |
4ee9e4930ad0c277ac82dc545653ba3ef880b7e6 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/sklearn/metrics/_scorer.py | 4bc04f4d204ed29c114a974229d0a44f0ba6f1b8 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:8f37b7f3b8da7123e4a98b2e1da0d724580dcc50f279d05bd2498a751b565c7c
size 29542
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
5078acc63f2d24dd99dc18787c02cd2665fb8670 | 48894ae68f0234e263d325470178d67ab313c73e | /sa/profiles/Alentis/NetPing/highlight.py | 60164c4a96843307785a0cc7612ad8968ecfa497 | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 1,558 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Alentis.NetPing highlight lexers
##----------------------------------------------------------------------
## Copyright (C) 2007-2014 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import *
class ConfigLexer(RegexLexer):
name = "Alentis.NetPing"
tokens = {
"root": [
(r"^!.*", Comment),
(r"(description)(.*?)$", bygroups(Keyword, Comment)),
(r"(password|shared-secret|secret)(\s+[57]\s+)(\S+)",
bygroups(Keyword, Number, String.Double)),
(r"(ca trustpoint\s+)(\S+)", bygroups(Keyword, String.Double)),
(r"^(interface|controller|router \S+|voice translation-\S+|voice-port)(.*?)$", bygroups(Keyword, Name.Attribute)),
(r"^(dial-peer\s+\S+\s+)(\S+)(.*?)$",
bygroups(Keyword, Name.Attribute, Keyword)),
(r"^(vlan\s+)(\d+)$", bygroups(Keyword, Name.Attribute)),
(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(/\d{1,2})?",
Number), # IPv4 Address/Prefix
(r"49\.\d{4}\.\d{4}\.\d{4}\.\d{4}\.\d{2}", Number), # NSAP
(r"(\s+[0-9a-f]{4}\.[0-9a-f]{4}\.[0-9a-f]{4}\s+)",
Number), # MAC Address
(r"^(?:no\s+)?\S+", Keyword),
(r"\s+\d+\s+\d*|,\d+|-\d+", Number),
(r".", Text)
]
}
| [
"dmitryluhtionov@gmail.com"
] | dmitryluhtionov@gmail.com |
eadd7513a637a4fbfe75dbf5f146bd0eb9c4b2a3 | e768a26a03283628ceccf98a021e9441101aae0c | /lstail/util/timestamp.py | 937ec3dd3a7938b64cdc4058ff09eb367a7a4a9f | [
"MIT"
] | permissive | eht16/lstail | d8a4ecadf41b71c72bcc54ab59ce7229f7060d00 | 8fb61e9d07b05b27e3d45e988afe0c198010248d | refs/heads/master | 2023-01-24T02:11:54.864001 | 2021-06-24T20:09:07 | 2021-06-24T20:09:07 | 231,070,462 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | # -*- coding: utf-8 -*-
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from datetime import datetime, timedelta
from lstail.constants import ELASTICSEARCH_TIMESTAMP_FORMATS
from lstail.error import InvalidTimeRangeFormatError, InvalidTimestampFormatError
# ----------------------------------------------------------------------
def parse_and_convert_time_range_to_start_date_time(time_range):
error_message = 'Invalid time range specified: {}. ' \
'Valid examples are: 60, 5m, 12h, 7d'.format(time_range)
try:
# try to parse the time range as integer, interpret the value as seconds
seconds = value = int(time_range)
except TypeError as exc_type:
raise InvalidTimeRangeFormatError(error_message) from exc_type
except ValueError as exc_value:
try:
suffix = time_range[-1]
value = int(time_range[:-1])
except (ValueError, IndexError):
raise InvalidTimeRangeFormatError(error_message) from exc_value
if suffix == 'd':
seconds = value * 86400
elif suffix == 'h':
seconds = value * 3600
elif suffix == 'm':
seconds = value * 60
else:
raise InvalidTimeRangeFormatError(error_message) from exc_value
if value < 0:
raise InvalidTimeRangeFormatError(error_message)
return datetime.now() - timedelta(seconds=seconds)
# ----------------------------------------------------------------------
def parse_timestamp_from_elasticsearch(timestamp):
for format_ in ELASTICSEARCH_TIMESTAMP_FORMATS:
try:
return datetime.strptime(timestamp, format_)
except ValueError:
continue
# we didn't find any matching format, so cry
raise InvalidTimestampFormatError(timestamp)
| [
"enrico.troeger@uvena.de"
] | enrico.troeger@uvena.de |
21b40d08fed1635c5fe1d8ce52d30d0da90e50af | 786de89be635eb21295070a6a3452f3a7fe6712c | /pyana_examples/tags/V00-00-22/src/myana_epics.py | 43a50ac4ee210d95837b5b08f3fe499855f1f31f | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,275 | py | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Pyana user analysis module myana_epics...
#
#------------------------------------------------------------------------
"""User analysis module for pyana framework.
This software was developed for the LCLS project. If you use all or
part of it, please give an appropriate acknowledgment.
@see RelatedModule
@version $Id$
@author Andrei Salnikov
"""
#------------------------------
# Module's version from SVN --
#------------------------------
__version__ = "$Revision$"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import logging
#-----------------------------
# Imports for other modules --
#-----------------------------
#----------------------------------
# Local non-exported definitions --
#----------------------------------
# local definitions usually start with _
#---------------------
# Class definition --
#---------------------
class myana_epics (object) :
"""Example analysis module which accesses EPICS data. """
#----------------
# Constructor --
#----------------
def __init__ ( self, pv = "BEAM:LCLS:ELEC:Q") :
"""Class constructor. The parameters to the constructor are passed
from pyana configuration file. If parameters do not have default
values here then the must be defined in pyana.cfg. All parameters
are passed as strings, convert to correct type before use.
@param pv Name of the EPICS PV to dump
"""
self.m_pv = pv
#-------------------
# Public methods --
#-------------------
def beginjob( self, evt, env ) :
# Preferred way to log information is via logging package
logging.info( "myana_epics.beginjob() called" )
# Use environment object to access EPICS data
pv = env.epicsStore().value(self.m_pv)
if not pv:
logging.warning('EPICS PV %s does not exist', self.m_pv)
else:
# Returned value should be of the type epics.EpicsPvCtrl.
# The code here demonstrates few members accessible for that type.
# For full list of members see Pyana Ref. Manual.
print "PV %s: id=%d type=%d size=%d status=%s severity=%s values=%s" % \
(self.m_pv, pv.iPvId, pv.iDbrType, pv.iNumElements,
pv.status, pv.severity, pv.values)
def event( self, evt, env ) :
# Use environment object to access EPICS data
pv = env.epicsStore().value(self.m_pv)
if not pv:
logging.warning('EPICS PV %s does not exist', self.m_pv)
else:
# Returned value should be of the type epics.EpicsPvTime.
# The code here demonstrates few members accessible for that type.
# For full list of members see Pyana Ref. Manual.
print "PV %s: id=%d type=%d size=%d status=%s severity=%s values=%s stamp=%s" % \
(self.m_pv, pv.iPvId, pv.iDbrType, pv.iNumElements,
pv.status, pv.severity, pv.values, pv.stamp)
def endjob( self, env ) :
pass
| [
"salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
2493c1cc3a4b0fe3b2854c9e23fc45bfface1968 | d50f50f455a2f96e7fbd9fb76fcdcdd71b8cc27c | /Day-23/Day23_Shahazada(ST).py | c48889980ab7f74e9d0a9c64863664893954a65a | [] | no_license | Rushi21-kesh/30DayOfPython | 9b2cc734c553b81d98593031a334b9a556640656 | d9741081716c3cf67823e2acf37f015b5906b913 | refs/heads/main | 2023-06-29T13:18:09.635799 | 2021-07-30T13:33:04 | 2021-07-30T13:33:04 | 384,316,331 | 1 | 0 | null | 2021-07-09T04:01:24 | 2021-07-09T04:01:23 | null | UTF-8 | Python | false | false | 695 | py | '''This program rotate list cyclically by user choice'''
if __name__ == '__main__':
n=int(input("Enter the size of list:- "))
print("Enter element of list")
elementList=[]
for i in range(n):
ele=int(input())
elementList.append(ele)
rotatedlist=[]
rotateBy=int(input("By how many element want to rotate:- "))
for i in range(n-1,n-rotateBy-1,-1):
rotatedlist.append(elementList[i])
for i in range(n-rotateBy):
rotatedlist.append(elementList[i])
print()
print("Rotated cyclically list element are :-",end=" ")
for i in range(n):
print(rotatedlist[i],end=" ")
| [
"noreply@github.com"
] | Rushi21-kesh.noreply@github.com |
eca0c55b107bd3d4779cf6d82077c32e6d204a7c | d7d524d1c0ba1cf62cdbc2f9bf5b9c66fa56726b | /47high.py | 7e7773c0544de60d34e249dd843254966da9d18b | [] | no_license | ramyasutraye/pythonproject | d997ca5ada024e211b6bf087d0d56684daf9df8b | 38975a99eb3ee1ad9e79a9efd538cc992d249fc3 | refs/heads/master | 2020-04-23T19:30:10.128774 | 2018-05-25T06:18:53 | 2018-05-25T06:18:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | n=int(input("enter the number:"))
a=[]
for i in range(0,n):
b=int(input("Enter number:"))
a.append(b)
a.sort()
print(min(a),max(a))
| [
"noreply@github.com"
] | ramyasutraye.noreply@github.com |
e0a183e93aadcfada6ef5a4998601ae0e7797837 | 6f9a5717fed38b0a79c399f7e5da55c6a461de6d | /Baekjoon/CardPurchase.py | 9a394075fa2e93607643af50ef1def9dbd576b48 | [] | no_license | Alfred-Walker/pythonps | d4d3b0f7fe93c138d02651e05ca5165825676a5e | 81ef8c712c36aa83d1c53aa50886eb845378d035 | refs/heads/master | 2022-04-16T21:34:39.316565 | 2020-04-10T07:50:46 | 2020-04-10T07:50:46 | 254,570,527 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | # 요즘 민규네 동네에서는 스타트링크에서 만든 PS카드를 모으는 것이 유행이다.
#
# PS카드는 PS(Problem Solving)분야에서 유명한 사람들의 아이디와 얼굴이 적혀있는 카드이다.
# 각각의 카드에는 등급을 나타내는 색이 칠해져 있고, 다음과 같이 8가지가 있다.
#
# 전설카드
# 레드카드
# 오렌지카드
# 퍼플카드
# 블루카드
# 청록카드
# 그린카드
# 그레이카드
# 카드는 카드팩의 형태로만 구매할 수 있고, 카드팩의 종류는 카드
# 1개가 포함된 카드팩, 카드 2개가 포함된 카드팩, ... 카드 N개가 포함된 카드팩
# 과 같이 총 N가지가 존재한다.
#
# 민규는 카드의 개수가 적은 팩이더라도 가격이 비싸면 높은 등급의 카드가 많이 들어있을 것이라는 미신을 믿고 있다.
# 따라서, 민규는 돈을 최대한 많이 지불해서 카드 N개 구매하려고 한다. 카드가 i개 포함된 카드팩의 가격은 Pi원이다.
#
# 예를 들어, 카드팩이 총 4가지 종류가 있고, P1 = 1, P2 = 5, P3 = 6, P4 = 7인 경우에
# 민규가 카드 4개를 갖기 위해 지불해야 하는 금액의 최댓값은 10원이다. 2개 들어있는 카드팩을 2번 사면 된다.
#
# P1 = 5, P2 = 2, P3 = 8, P4 = 10인 경우에는
# 카드가 1개 들어있는 카드팩을 4번 사면 20원이고, 이 경우가 민규가 지불해야 하는 금액의 최댓값이다.
#
# 마지막으로, P1 = 3, P2 = 5, P3 = 15, P4 = 16인 경우에는
# 3개 들어있는 카드팩과 1개 들어있는 카드팩을 구매해 18원을 지불하는 것이 최댓값이다.
#
# 카드 팩의 가격이 주어졌을 때, N개의 카드를 구매하기 위해 민규가 지불해야 하는 금액의 최댓값을 구하는 프로그램을 작성하시오.
# N개보다 많은 개수의 카드를 산 다음, 나머지 카드를 버려서 N개를 만드는 것은 불가능하다.
# 즉, 구매한 카드팩에 포함되어 있는 카드 개수의 합은 N과 같아야 한다.
#
# 입력
# 첫째 줄에 민규가 구매하려고 하는 카드의 개수 N이 주어진다. (1 ≤ N ≤ 1,000)
# 둘째 줄에는 Pi가 P1부터 PN까지 순서대로 주어진다. (1 ≤ Pi ≤ 10,000)
#
# 출력
# 첫째 줄에 민규가 카드 N개를 갖기 위해 지불해야 하는 금액의 최댓값을 출력한다.
import sys
N = int(sys.stdin.readline().rstrip()) # 구입할 카드의 개수
P = [0] + list(map(int, sys.stdin.readline().rstrip().split())) # 편의상 0 추가
dp = dict() # dp[i]: 카드를 i개 갖기 위해 지불한 금액의 최댓값
dp[0] = 0
for i in range(1, N+1):
dp[i] = 0
for j in range(1, i + 1):
dp[i] = max(dp[i], dp[i - j] + P[j]) # j번째 카드팩에 대하여, dp[i] = dp[i-j] + P[j]
print(dp[N])
| [
"studio.alfred.walker@gmail.com"
] | studio.alfred.walker@gmail.com |
97952047d650f3e84518e6583fe08909eb2da9a6 | 9ac19e6733e1f91bb9cb0fe47967491a5e856040 | /test/test_revoke.py | 8c0a82737e93f456dc810b7cc587fccd0bb78a27 | [
"MIT"
] | permissive | DS4SD/project-mognet | 7898b41046a31b82052b1424e6910cb65b14e5c5 | 9e415e88404da0a0eab3b379d6cd7b7d15ca71a6 | refs/heads/main | 2023-05-23T22:02:10.406590 | 2022-07-13T11:53:28 | 2022-07-13T11:53:28 | 474,094,219 | 5 | 1 | MIT | 2022-07-13T11:53:29 | 2022-03-25T16:55:53 | Python | UTF-8 | Python | false | false | 1,402 | py | import asyncio
import uuid
from mognet.model.result import ResultFailed
from mognet.model.result_state import ResultState
import pytest
from mognet import App, Request, Context, task
@pytest.mark.asyncio
async def test_revoke(test_app: App):
req = Request(name="test.sleep", args=(10,))
await test_app.submit(req)
await asyncio.sleep(2)
await test_app.revoke(req.id)
res = await test_app.result_backend.get(req.id)
assert res is not None
assert res.state == ResultState.REVOKED
assert res.revoked
with pytest.raises(ResultFailed):
await res
@task(name="test.recurses_after_wait")
async def recurses_after_wait(context: Context, child_id: uuid.UUID):
req = Request(name="test.add", id=child_id, args=(1, 2))
try:
await asyncio.sleep(5)
finally:
await context.submit(req)
@pytest.mark.asyncio
async def test_revokes_children_if_parent_revoked(test_app: App):
child_id = uuid.uuid4()
req = Request(name="test.recurses_after_wait", args=(child_id,))
await test_app.submit(req)
await asyncio.sleep(1)
await test_app.revoke(req.id)
await asyncio.sleep(1)
child_res = await test_app.result_backend.get(child_id)
assert child_res is not None
assert child_res.state == ResultState.REVOKED
assert child_res.revoked
with pytest.raises(ResultFailed):
await child_res
| [
"dol@zurich.ibm.com"
] | dol@zurich.ibm.com |
9c26d10ade54eaa70dce931bd5513bb7e4b1f601 | 1b5802806cdf2c3b6f57a7b826c3e064aac51d98 | /tensorrt-basic-1.10-3rd-plugin/TensorRT-main/demo/HuggingFace/NNDF/general_utils.py | 64717b1eb53c28be3c1809bc124766cc218189cd | [
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"ISC",
"BSD-2-Clause"
] | permissive | jinmin527/learning-cuda-trt | def70b3b1b23b421ab7844237ce39ca1f176b297 | 81438d602344c977ef3cab71bd04995c1834e51c | refs/heads/main | 2023-05-23T08:56:09.205628 | 2022-07-24T02:48:24 | 2022-07-24T02:48:24 | 517,213,903 | 36 | 18 | null | 2022-07-24T03:05:05 | 2022-07-24T03:05:05 | null | UTF-8 | Python | false | false | 6,519 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utils used by demo folder."""
import os
import shutil
import timeit
from shutil import rmtree
from typing import Callable, Union, List
from collections import defaultdict
from statistics import mean, median
from glob import glob
# NNDF
from NNDF.networks import NNConfig, NetworkResult, NetworkMetadata
from NNDF.logger import G_LOGGER
# Used for HuggingFace setting random seed
RANDOM_SEED = 42
# Networks #
def register_network_folders(
root_dir: str, config_file_str: str = "*Config.py"
) -> List[str]:
networks = []
for network_configs in glob(os.path.join(root_dir, "*", config_file_str)):
network_name = os.path.split(os.path.split(network_configs)[0])[1]
networks.append(network_name)
return networks
def process_results(category: List[str], results: List[NetworkResult], nconfig: NNConfig):
"""
Calculate and process results across multiple runs.
"""
general_stats = ["script", "accuracy"]
runtime_result_row_names = list(nconfig.NETWORK_SEGMENTS)
if nconfig.NETWORK_FULL_NAME not in nconfig.NETWORK_SEGMENTS:
runtime_result_row_names.append(nconfig.NETWORK_FULL_NAME)
rows = []
row_entry = []
for cat, result in zip(category, results):
# Process runtime results for each group
runtime_results = defaultdict(list)
for runtimes in [nr.median_runtime for nr in result.network_results]:
for runtime in runtimes:
runtime_results[runtime.name].append(runtime.runtime)
# Calculate average runtime for each group
average_group_runtime = {k: mean(v) for k, v in runtime_results.items()}
row_entry = [cat, result.accuracy] + [
average_group_runtime[n] for n in runtime_result_row_names
]
rows.append(row_entry)
headers = general_stats + [r + " (sec)" for r in runtime_result_row_names]
return headers, rows
def process_per_result_entries(script_category: List[str], results: List[NetworkResult], max_output_char:int = 30):
"""Prints tabulations for each entry returned by the runtime result."""
def _shorten_text(w):
l = len(w)
if l > max_output_char:
return w[0:max_output_char // 2] + " ... " + w[-max_output_char//2:]
return w
headers = ["script", "network_part", "accuracy", "runtime", "input", "output"]
row_data_by_input = defaultdict(list)
for cat, result in zip(script_category, results):
for nr in result.network_results:
for runtime in nr.median_runtime:
row_data_by_input[hash(nr.input)].append([
cat,
runtime.name,
result.accuracy,
runtime.runtime,
_shorten_text(nr.input),
_shorten_text(nr.semantic_output)
])
return headers, dict(row_data_by_input)
# IO #
def confirm_folder_delete(
fpath: str, prompt: str = "Confirm you want to delete entire folder?"
) -> None:
"""
Confirms whether or not user wants to delete given folder path.
Args:
fpath (str): Path to folder.
prompt (str): Prompt to display
Returns:
None
"""
msg = prompt + " {} [Y/n] ".format(fpath)
confirm = input(msg)
if confirm == "Y":
rmtree(fpath)
else:
G_LOGGER.info("Skipping file removal.")
def remove_if_empty(
fpath: str,
success_msg: str = "Folder successfully removed.",
error_msg: str = "Folder cannot be removed, there are files.",
) -> None:
"""
Removes an entire folder if folder is empty. Provides print info statements.
Args:
fpath: Location to folder
success_msg: Success message.
error_msg: Error message.
Returns:
None
"""
if len(os.listdir(fpath)) == 0:
os.rmdir(fpath)
G_LOGGER.info(success_msg + " {}".format(fpath))
else:
G_LOGGER.info(error_msg + " {}".format(fpath))
def measure_python_inference_code(
stmt: Union[Callable, str], warmup: int = 3, number: int = 10, iterations: int = 10
) -> None:
"""
Measures the time it takes to run Pythonic inference code.
Statement given should be the actual model inference like forward() in torch.
See timeit for more details on how stmt works.
Args:
stmt (Union[Callable, str]): Callable or string for generating numbers.
number (int): Number of times to call function per iteration.
iterations (int): Number of measurement cycles.
"""
G_LOGGER.debug(
"Measuring inference call with warmup: {} and number: {} and iterations {}".format(
warmup, number, iterations
)
)
# Warmup
warmup_mintime = timeit.repeat(stmt, number=number, repeat=warmup)
G_LOGGER.debug("Warmup times: {}".format(warmup_mintime))
return median(timeit.repeat(stmt, number=number, repeat=iterations)) / number
class NNFolderWorkspace:
"""
For keeping track of workspace folder and for cleaning them up.
Due to potential corruption of ONNX model conversion, the workspace is split up by model variants.
"""
def __init__(
self, network_name: str, metadata: NetworkMetadata, working_directory: str
):
self.rootdir = working_directory
self.metadata = metadata
self.network_name = network_name
self.dpath = os.path.join(self.rootdir, self.network_name, metadata.variant)
os.makedirs(self.dpath, exist_ok=True)
def get_path(self) -> str:
return self.dpath
def cleanup(self, force_remove: bool = False) -> None:
fpath = self.get_path()
if force_remove:
return shutil.rmtree(fpath)
remove_if_empty(
fpath,
success_msg="Sucessfully removed workspace.",
error_msg="Unable to remove workspace.",
)
| [
"dujw@deepblueai.com"
] | dujw@deepblueai.com |
056c67711ce448d1738be23298e3ca357a3e5980 | 6800da49fb74cbc0079d3106762122ea102562be | /channel_manager.py | a7711d6079828cd8d5634f7a70a045f8e2856764 | [] | no_license | legoktm/adminbots | 8f9e03eb2002addf0e0589d627202cd977bafd7e | 0b0a913c8b1ad3d92b77d6352660a05af54f5e06 | refs/heads/master | 2016-09-05T19:17:30.683600 | 2013-11-21T01:51:20 | 2013-11-21T01:51:20 | 10,352,998 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | # -*- coding: utf-8 -*-
# (C) Legoktm, 2013
# Licensed under the MIT License
# Assists with joining/parting channels
import os
import yaml
from mtirc import hooks
filename = os.path.expanduser('~/channels.yml')
def get_channel_list():
with open(filename) as f:
raw = f.read()
data = yaml.load(raw)
return data
def on_connect(**kw):
data = get_channel_list()
if kw['server'] in data:
for channel in data[kw['server']]:
kw['bot'].servers[kw['server']].join(channel)
def on_msg(**kw):
if kw['text'] == '!reload channels':
data = get_channel_list()
for server in data:
if server in kw['bot'].servers:
for channel in data[server]:
kw['bot'].servers[server].join(channel)
hooks.add_hook('connected', 'channel_joiner', on_connect)
hooks.add_hook('on_msg', 'channel_reloader', on_msg)
| [
"legoktm@gmail.com"
] | legoktm@gmail.com |
da01bc57ce96ce7a637d80966352e5dd5539954c | b2ff7365dda9fa9290c2eae04988e3bda9cae23a | /13_top_k/8.py | 74578e1adccfe804dc3394e7b47b49305637cb5f | [] | no_license | terrifyzhao/educative3 | cd6ccdb0fc4b9ba7f5058fe2e3d2707f022d8b16 | 5c7db9ef6cf58ca5e68bb5aec8ed95af1d5c0f47 | refs/heads/master | 2022-12-26T23:53:30.645339 | 2020-10-10T00:48:22 | 2020-10-10T00:48:22 | 298,991,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | from heapq import *
def find_closest_elements(arr, K, X):
result = []
min_heap = []
for i in range(len(arr)):
heappush(min_heap, [abs(arr[i] - X), i])
for i in range(K):
index = heappop(min_heap)[1]
result.append(arr[index])
return result
def main():
print("'K' closest numbers to 'X' are: " +
str(find_closest_elements([5, 6, 7, 8, 9], 3, 7)))
print("'K' closest numbers to 'X' are: " +
str(find_closest_elements([2, 4, 5, 6, 9], 3, 6)))
print("'K' closest numbers to 'X' are: " +
str(find_closest_elements([2, 4, 5, 6, 9], 3, 10)))
main()
| [
"zjiuzhou@gmail.com"
] | zjiuzhou@gmail.com |
aaacb65e368ef4189378c6a8e678963699b64664 | 8b8351c8d0431a95d2e1ad88a1ef42470ff6f66c | /python/exceptions_hierarchy.py | d02f55310a599e914c6972c208f5bba08b75fd07 | [] | no_license | miso-belica/playground | 2d771197cca8d8a0031466c97317bfa38bb2faff | 6c68f648301801785db8b3b26cb3f31b782389ec | refs/heads/main | 2022-11-29T09:17:24.661966 | 2022-11-23T22:19:50 | 2022-11-24T07:20:13 | 30,124,960 | 14 | 8 | null | 2022-11-24T07:20:14 | 2015-01-31T20:18:53 | Python | UTF-8 | Python | false | false | 386 | py | # -*- coding: utf-8 -*-
import sys
import time
if __name__ == "__main__":
try:
# time.sleep(5)
# sys.exit(1)
raise ValueError("value")
except Exception as e:
print("Catched ValueError!", e)
try:
sys.exit(1)
except Exception:
print("Catched exit by Exception.")
except:
print("Catched exit by empty except")
| [
"miso.belica@gmail.com"
] | miso.belica@gmail.com |
40bb5c70b623f899a77f7317d99cbb2312d64a19 | 086ece6952b56602c20709bfa219037b0375ab0c | /ENGLISH_DETECTION.py | 085150b7a9dbb3fd571fb2f0c15a3e22448c73fd | [] | no_license | pseudo11235813/Random-Python-Ciphers | 71aca8561b003ab8818e4d934288aef8d2779e9c | 48f04b2e6a32ea67a2bc88d0bb283a51fd5150e5 | refs/heads/master | 2022-12-20T03:53:04.643760 | 2020-09-21T19:16:41 | 2020-09-21T19:16:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | #Self-dev English Detector module
#you need to download an English dictionary Text File to use this module
#go to google or any search engine and download an English Dictionary File
#a one you can use :
#import eng
#eng.isEnglish(message) ; will return false or true.
#eng.DictLoader() ; will ask you for a the English dictionary file name then returns a dictionary that contains all the words in that dictionary file.
#eng.countWords() ; will return the percentage of English words in the entered text .
#eng.englishWords() ; will return the english words in the entered text in a list.
Englishletters = "abcdefghijklmnopqrstuvwxyz"
fullEnglishLetters = Englishletters + " \n\t"
def DictLoader(): #loads the dictionary text File, if the function isn't called the program will automatically set the language to English.
englishWords = {}
dictFileName = input("enter the dictionary file name (make sure to specify the whole path or copy the dictionary file into the python32 folder and just type the File's name) : ")
dictFile = open(dictFileName)
EnglishWords = dictFile.read()
for word in EnglishWords.split('\n'):
englishWords[word] = None
dictFile.close()
return englishWords
def countWords(text): #this function will remove all the non-letter characters and count how many real word(not gibberish) in the provided text.
chars = []
counter = 0
for char in text:
if char in fullEnglishLetters:
chars.append(char)
nonLetters = ''.join(chars)
if chars == []:
return 0
wordsDict = DictLoader()
for word in nonLetters.split():
if word in wordsDict:
counter += 1
return (counter/len(nonLetters.split())) * 100
def isEnglish(text , percantage = 35 , letterPercentage = 5):
wordsMatch = countWords(text) >= percantage
chars = []
for char in text:
if char in fullEnglishLetters:
chars.append(char)
nonLetters = ''.join(chars)
lettersPercentage = len(nonLetters)/len(text) * 100
lettersMatch = lettersPercentage >= letterPercentage
return wordsMatch and lettersMatch
def englishWords(text):
chars = []
eng = []
counter = 0
for char in text:
if char in fullEnglishLetters:
chars.append(char)
nonLetters = ''.join(chars)
if chars == []:
return 0
wordsDict = DictLoader()
for word in nonLetters.split():
if word in wordsDict:
eng.append(word)
return eng
| [
"="
] | = |
42db80f31e7be0f63eda0be8f66f974d95ed6f61 | dbd23b5c9ead096ea1b4c4ddd2acba3f6b4eb0db | /testing/test_delete_job_when_finished.py | 62808bad32277674cc8859516c778e703e7e1ef6 | [] | no_license | NGTS/real-time-transmission | b067b9572d02ae99c7cbd6c569c4ac36eb14bc25 | f70901dbc9ae59515e7786d5d3a5978c46adc312 | refs/heads/master | 2020-06-07T16:11:45.860569 | 2018-02-26T11:15:49 | 2018-02-26T11:15:49 | 42,189,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | import pytest
import pymysql
from ngts_transmission.watching import fetch_transmission_jobs
@pytest.yield_fixture
def cursor():
connection = pymysql.connect(user='ops', db='ngts_ops')
cursor = connection.cursor()
try:
yield cursor
finally:
connection.rollback()
connection.close()
def test_job_deleted(job_db, cursor):
job = list(fetch_transmission_jobs(cursor))[0]
job.remove_from_database(cursor)
assert list(fetch_transmission_jobs(cursor)) == []
| [
"s.r.walker101@googlemail.com"
] | s.r.walker101@googlemail.com |
3a0df4406e2172b099f147aac840fbdf997001a3 | 620cd7d12a3d241da9fe59f30bbbc97c3ffa61e2 | /apptools/apptools-android-tests/apptools/build_path.py | 2ceabab85becaf578c47fac647bd2d36b0cc4829 | [
"BSD-3-Clause"
] | permissive | playbar/crosswalk-test-suite | e46db96343f4a47f1a19fddaedc519818c10d992 | 29686407e8b3106cf2b0e87080f927609e745f8e | refs/heads/master | 2021-05-29T15:33:01.099059 | 2015-10-09T06:03:22 | 2015-10-09T06:03:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,717 | py | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
import shutil
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_build_path_normal(self):
comm.setUp()
comm.create(self)
if os.path.exists("pkg"):
shutil.rmtree("pkg")
os.mkdir("pkg")
os.chdir('pkg')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build " + comm.XwalkPath + "org.xwalk.test"
comm.build(self, buildcmd)
comm.run(self)
os.chdir('../')
shutil.rmtree("pkg")
comm.clear("org.xwalk.test")
os.system('adb start-server')
def test_build_path_release(self):
comm.setUp()
comm.create(self)
if os.path.exists("pkg"):
shutil.rmtree("pkg")
os.mkdir("pkg")
os.chdir('pkg')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build release " + comm.XwalkPath + "org.xwalk.test"
comm.build(self, buildcmd)
comm.run(self)
os.chdir('../')
shutil.rmtree("pkg")
comm.clear("org.xwalk.test")
os.system('adb start-server')
if __name__ == '__main__':
unittest.main()
| [
"yunx.liu@intel.com"
] | yunx.liu@intel.com |
2ba5addce7fd9b7cb325e48d5c45b3ce7fd59344 | 57fc5d54f5df359c7a53020fb903f36479d3a322 | /controllers/.history/supervisor/test_20201127155537.py | e3b792d9d8e09cd6b8a393ddf0de55beb40da8d5 | [] | no_license | shenwuyue-xie/webots_testrobots | 929369b127258d85e66c5275c9366ce1a0eb17c7 | 56e476356f3cf666edad6449e2da874bb4fb4da3 | refs/heads/master | 2023-02-02T11:17:36.017289 | 2020-12-20T08:22:59 | 2020-12-20T08:22:59 | 323,032,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | import numpy
import math
import os
# def normalize_to_range(value, min, max, newMin, newMax):
# value = float(value)
# min = float(min)
# max = float(max)
# newMin = float(newMin)
# newMax = float(newMax)
# return (newMax - newMin) / (max - min) * (value - max) + newMax
# k = normalize_to_range(50,0,1000,0,1)
# print(k)
# x = [0.5 for i in range(12)]
# y = numpy.random.normal(12)
# """ better function """
# def robot_step(self,action):
# flag_translation = False
# flag_rotation = False
# if action[-1] > 0.8 and action[-1] <= 1 and self.robot_num < Max_robotnum:
# last_translation = self.robot_handles[-1].getField('translation').getSFVec3f()
# last_angle = self.robot_handles[-1].getField('rotation').getSFRotation()[3]
# last_rotation = self.robot_handles[-1].getField('rotation').getSFRotation()
# delta_z = 0.23 * math.cos(last_angle)
# delta_x = 0.23 * math.sin(last_angle)
# new_translation = []
# new_translation.append(last_translation[0] - delta_x)
# new_translation.append(last_translation[1])
# new_translation.append(last_translation[2] - delta_z)
# robot_children = self.robot_handles[-1].getField('children')
# rearjoint_node = robot_children.getMFNode(4)
# joint = rearjoint_node.getField('jointParameters')
# joint = joint.getSFNode()
# para = joint.getField('position')
# hingeposition = para.getSFFloat()
# if hingeposition > 0.8 or hingeposition < -0.8:
# delta = 0.03 - 0.03 * math.cos(hingeposition)
# delta_z = delta * math.cos(last_angle)
# delta_x = delta * math.sin(last_angle)
# new_translation[0] = new_translation[0] + delta_x
# new_translation[2] = new_translation[2] + delta_z
# new_rotation = []
# for i in range(4):
# new_rotation.append(last_rotation[i])
flag_translation = False
flag_rotation = False
new_file = []
with open ("Robot.wbo",'r+') as f:
lines = f.readlines()
for line in lines:
if "translation" in line:
if flag_translation == False:
replace = "translation " + str(0) + " " + str(0) + " " + str(0)
line = "\t" + replace +'\n'
flag_translation = True
if "rotation" in line:
if flag_rotation == False:
replace = "rotation " + str(0) + " " + str(0) + " " + str(0) + " " \
+str(0)
line = "\t" + replace +'\n'
flag_rotation = True
new_file = []
# rootNode = self.supervisor.getRoot()
# childrenField = rootNode.getField('children')
# childrenField.importMFNode(-1,importname)
# defname = 'robot_' + str(self.robot_num)
# self.robot_handles.append(self.supervisor.getFromDef(defname))
# self.robot_num = self.robot_num + 1
# elif action[-1] >0 and action[-1] <= 0.2 and self.robot_num >1:
# removerobot = self.robot_handles[-1]
# removerobot.remove()
# self.robot_num = self.robot_num - 1
# del(self.robot_handles[-1])
| [
"1092673859@qq.com"
] | 1092673859@qq.com |
4282f0f0a003d44a67bb0595b6cc543a4271a345 | 4920b6c12dc2427036077d38ed8fa513130418a8 | /bipad_api/bipad_api/models/inline_response20053.py | a52d1c5a3839994bdacc9f2ef436923135b09853 | [] | no_license | laxmitimalsina/covid_dashboard | d51a43d3ba2ad8a9754f723383f6395c1dccdda5 | ccba8a3f5dd6dbd2b28e2479bda6e581eb23805f | refs/heads/master | 2023-05-29T15:07:32.524640 | 2021-05-03T11:15:43 | 2021-05-03T11:15:43 | 273,698,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,265 | py | # coding: utf-8
"""
BIPAD API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse20053(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'next': 'str',
'previous': 'str',
'results': 'list[ReleaseStatus]'
}
attribute_map = {
'count': 'count',
'next': 'next',
'previous': 'previous',
'results': 'results'
}
def __init__(self, count=None, next=None, previous=None, results=None): # noqa: E501
"""InlineResponse20053 - a model defined in Swagger""" # noqa: E501
self._count = None
self._next = None
self._previous = None
self._results = None
self.discriminator = None
self.count = count
if next is not None:
self.next = next
if previous is not None:
self.previous = previous
self.results = results
@property
def count(self):
"""Gets the count of this InlineResponse20053. # noqa: E501
:return: The count of this InlineResponse20053. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this InlineResponse20053.
:param count: The count of this InlineResponse20053. # noqa: E501
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
@property
def next(self):
"""Gets the next of this InlineResponse20053. # noqa: E501
:return: The next of this InlineResponse20053. # noqa: E501
:rtype: str
"""
return self._next
@next.setter
def next(self, next):
"""Sets the next of this InlineResponse20053.
:param next: The next of this InlineResponse20053. # noqa: E501
:type: str
"""
self._next = next
@property
def previous(self):
"""Gets the previous of this InlineResponse20053. # noqa: E501
:return: The previous of this InlineResponse20053. # noqa: E501
:rtype: str
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this InlineResponse20053.
:param previous: The previous of this InlineResponse20053. # noqa: E501
:type: str
"""
self._previous = previous
@property
def results(self):
"""Gets the results of this InlineResponse20053. # noqa: E501
:return: The results of this InlineResponse20053. # noqa: E501
:rtype: list[ReleaseStatus]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this InlineResponse20053.
:param results: The results of this InlineResponse20053. # noqa: E501
:type: list[ReleaseStatus]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse20053, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20053):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"laxmitimalsina2017@gmail.com"
] | laxmitimalsina2017@gmail.com |
d5fef288e77bbef2f70188793ca31f94dd4b090c | d1fb8bb087564052674cb33ac6d75daca4ae586a | /Amazon 11月VO真题/1/1181. Diameter of Binary Tree.py | feaeae0a737d49417a450acd9d2dc1468f37fd6f | [] | no_license | YunsongZhang/lintcode-python | 7db4ca48430a05331e17f4b79d05da585b1611ca | ea6a0ff58170499c76e9569074cb77f6bcef447a | refs/heads/master | 2020-12-24T03:05:43.487532 | 2020-01-30T19:58:37 | 2020-01-30T19:58:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: a root of binary tree
@return: return a integer
"""
def __init__(self):
self.diameter = 0
def diameterOfBinaryTree(self, root):
if not root:
return 0
self.dfs(root)
return self.diameter
def dfs(self, root):
if not root:
return 0
left = self.dfs(root.left)
right = self.dfs(root.right)
self.diameter = max(self.diameter, left + right)
return max(left, right) + 1
| [
"haixiang6123@gmail.com"
] | haixiang6123@gmail.com |
d355b7ca1605808368bffb45037fb5f9c0de8c1b | 31f5c200fbaded1f3670b94042b9c47182a160ca | /ch17/q17.py | 9d33c61cbee9a9322dbd698c19e807c63a45bd91 | [] | no_license | AeroX2/advent-of-code-2020 | d86f15593ceea442515e2853003d3a1ec6527475 | e47c02e4d746ac88f105bf5a8c55dcd519f4afe8 | refs/heads/main | 2023-02-03T02:24:58.494613 | 2020-12-22T10:15:29 | 2020-12-22T10:15:29 | 317,873,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | import sys
data = open(sys.argv[1]).read().strip()
data = [list(x) for x in data.split('\n')]
print(data)
cube = {}
for y,v in enumerate(data):
h = len(v)//2
for x,v2 in enumerate(v):
cube[(x-h,y-h,0)] = v2
print(cube)
width = len(data[0])
height = width
depth = 0
print('Dimensions')
print((width, height, depth))
def check_active(pos):
active_count = 0
for x in range(-1,2):
for y in range(-1,2):
for z in range(-1,2):
if (x == 0 and y == 0 and z == 0):
continue
new_pos = (pos[0]+x, pos[1]+y, pos[2]+z)
active_count += 1 if cube.get(new_pos, '.') == '#' else 0
return active_count
for i in range(6):
#for z in range(-depth,depth+1):
# print('z =',z)
# for y in range(-height,height+1):
# for x in range(-width,width+1):
# print(cube.get((x,y,z),'.'),end='')
# print()
width += 1
height += 1
depth += 1
modify_list = []
for x in range(-width,width+1):
for y in range(-height,height+1):
for z in range(-depth,depth+1):
is_active = cube.get((x,y,z), '.') == '#'
active_count = check_active((x,y,z))
if (is_active and not (active_count == 2 or active_count == 3)):
modify_list.append((x,y,z,'.'))
elif (not is_active and (active_count == 3)):
modify_list.append((x,y,z,'#'))
for x,y,z,v in modify_list:
cube[(x,y,z)] = v
print(len([x for x in cube.values() if x == '#']))
| [
"james@ridey.email"
] | james@ridey.email |
aceeca9c2d8787ad6a846e833d5d569f4584213e | 4a191e5aecd53c4cea28482a0179539eeb6cd74b | /comments/forms.py | 5ba7334316083549b5600318794c46b4b51310b5 | [] | no_license | jiangjingwei/blogproject | 631a2e8e2f72420cce45ddaf152174852376d831 | daf14e88092dc030a3ab0c295ee06fb6b2164372 | refs/heads/master | 2020-03-14T23:29:08.052253 | 2018-05-10T11:35:59 | 2018-05-10T11:35:59 | 131,846,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | from django import forms
from comments.models import Comments
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ['name', 'email', 'url', 'text']
| [
"270159429@qq.com"
] | 270159429@qq.com |
852adc191b890bbcb734581a6f26bd32495378c8 | ec21d4397a1939ac140c22eca12491c258ed6a92 | /Zope-2.9/lib/python/Testing/dispatcher.py | a309a4937442b12ebedc1492329a89a0f71071bb | [] | no_license | wpjunior/proled | dc9120eaa6067821c983b67836026602bbb3a211 | 1c81471295a831b0970085c44e66172a63c3a2b0 | refs/heads/master | 2016-08-08T11:59:09.748402 | 2012-04-17T07:37:43 | 2012-04-17T07:37:43 | 3,573,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,469 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Dispatcher for usage inside Zope test environment
# Andreas Jung, andreas@digicool.com 03/24/2001
__version__ = '$Id: dispatcher.py 40222 2005-11-18 15:46:28Z andreasjung $'
import os,sys,re,string
import threading,time,commands,profile
class Dispatcher:
"""
a multi-purpose thread dispatcher
"""
def __init__(self,func=''):
self.fp = sys.stderr
self.f_startup = []
self.f_teardown = []
self.lastlog = ""
self.lock = threading.Lock()
self.func = func
self.profiling = 0
self.doc = getattr(self,self.func).__doc__
def setlog(self,fp):
self.fp = fp
def log(self,s):
if s==self.lastlog: return
self.fp.write(s)
self.fp.flush()
self.lastlog=s
def logn(self,s):
if s==self.lastlog: return
self.fp.write(s + '\n')
self.fp.flush()
self.lastlog=s
def profiling_on():
self.profiling = 1
def profiling_off():
self.profiling = 0
def dispatcher(self,name='', *params):
""" dispatcher for threads
The dispatcher expects one or several tupels:
(functionname, number of threads to start , args, keyword args)
"""
self.mem_usage = [-1]
mem_watcher = threading.Thread(None,self.mem_watcher,name='memwatcher')
mem_watcher.start()
self.start_test = time.time()
self.name = name
self.th_data = {}
self.runtime = {}
self._threads = []
s2s=self.s2s
for func,numthreads,args,kw in params:
f = getattr(self,func)
for i in range(0,numthreads):
kw['t_func'] = func
th = threading.Thread(None,self.worker,name="TH_%s_%03d" % (func,i) ,args=args,kwargs=kw)
self._threads.append(th)
for th in self._threads: th.start()
while threading.activeCount() > 1: time.sleep(1)
self.logn('ID: %s ' % self.name)
self.logn('FUNC: %s ' % self.func)
self.logn('DOC: %s ' % self.doc)
self.logn('Args: %s' % params)
for th in self._threads:
self.logn( '%-30s ........................ %9.3f sec' % (th.getName(), self.runtime[th.getName()]) )
for k,v in self.th_data[th.getName()].items():
self.logn ('%-30s %-15s = %s' % (' ',k,v) )
self.logn("")
self.logn('Complete running time: %9.3f sec' % (time.time()-self.start_test) )
if len(self.mem_usage)>1: self.mem_usage.remove(-1)
self.logn( "Memory: start: %s, end: %s, low: %s, high: %s" % \
(s2s(self.mem_usage[0]),s2s(self.mem_usage[-1]),s2s(min(self.mem_usage)), s2s(max(self.mem_usage))))
self.logn('')
def worker(self,*args,**kw):
for func in self.f_startup: f = getattr(self,func)()
t_func = getattr(self,kw['t_func'])
del kw['t_func']
ts = time.time()
apply(t_func,args,kw)
te = time.time()
for func in self.f_teardown: getattr(self,func)()
def th_setup(self):
""" initalize thread with some environment data """
env = {'start': time.time()
}
return env
def th_teardown(self,env,**kw):
""" famous last actions of thread """
self.lock.acquire()
self.th_data[ threading.currentThread().getName() ] = kw
self.runtime [ threading.currentThread().getName() ] = time.time() - env['start']
self.lock.release()
def getmem(self):
""" try to determine the current memory usage """
if not sys.platform in ['linux2']: return None
cmd = '/bin/ps --no-headers -o pid,vsize --pid %s' % os.getpid()
outp = commands.getoutput(cmd)
pid,vsize = filter(lambda x: x!="" , string.split(outp," ") )
data = open("/proc/%d/statm" % os.getpid()).read()
fields = re.split(" ",data)
mem = string.atoi(fields[0]) * 4096
return mem
def mem_watcher(self):
""" thread for watching memory usage """
running = 1
while running ==1:
self.mem_usage.append( self.getmem() )
time.sleep(1)
if threading.activeCount() == 2: running = 0
def register_startup(self,func):
self.f_startup.append(func)
def register_teardown(self,func):
self.f_teardown.append(func)
def s2s(self,n):
import math
if n <1024.0: return "%8.3lf Bytes" % n
if n <1024.0*1024.0: return "%8.3lf KB" % (1.0*n/1024.0)
if n <1024.0*1024.0*1024.0: return "%8.3lf MB" % (1.0*n/1024.0/1024.0)
else: return n
if __name__=="__main__":
d=Dispatcher()
print d.getmem()
pass
| [
"root@cpro5106.publiccloud.com.br"
] | root@cpro5106.publiccloud.com.br |
d624d83e089f1a87248f66a4ca5b174c8b084b89 | 99a2d82d2a10c0af77731885f80307edcdc48535 | /maildir-cat | c6e018e5ef5f707ebd50c8ae0030ce0ce56bf5cd | [
"WTFPL",
"LicenseRef-scancode-public-domain"
] | permissive | mk-fg/fgtk | be60c102f6ad6cd0d0e364c3863c36a1902a15a3 | 90de180b0d4184f3040d85a4ff2ac38319a992af | refs/heads/master | 2023-09-06T08:41:33.852815 | 2023-08-19T07:25:54 | 2023-08-19T07:25:54 | 3,831,498 | 149 | 46 | null | 2017-08-15T19:23:59 | 2012-03-26T09:58:03 | Python | UTF-8 | Python | false | false | 4,883 | #!/usr/bin/env python3
import itertools as it, operator as op, functools as ft
import mailbox, email, email.header, email.charset, email.errors
import os, sys, re, pathlib as pl, collections as cs
def bytes_decode(b, enc, errors='strict'):
try: return b.decode(enc, errors)
except LookupError as err:
# Try to handle cp-850, unknown-8bit and such
if enc == 'unknown-8bit': enc_sub = 'utf-8'
else: enc_sub = enc.replace('-', '')
if enc_sub == enc: raise
try: return b.decode(enc_sub, errors)
except LookupError:
raise LookupError(enc, enc_sub) from None
def _mail_header_decode_part(line):
header = ''
for part, enc in email.header.decode_header(line):
if enc: part = bytes_decode(part, enc, 'replace')
if isinstance(part, bytes): part = part.decode('utf-8', 'replace')
# RFC2822#2.2.3 whitespace folding auto-adds spaces.
# But extra space can also be encoded in base64 or such,
# so this does not preserve exact number of encoded spaces.
if not header.endswith(' '): header += ' '
header += part.lstrip()
return header.strip()
def mail_header_decode(val):
res, header = list(), _mail_header_decode_part(val)
while True:
match = re.search('=\?[\w\d-]+(\*[\w\d-]+)?\?[QB]\?[^?]+\?=', header)
if not match:
res.append(header)
break
start, end = match.span(0)
match = header[start:end]
try: match = _mail_header_decode_part(match)
except email.errors.HeaderParseError: pass
res.extend([header[:start], match])
header = header[end:]
return ''.join(res)
def _mail_parse(msg):
headers = MailMsgHeaders((k.lower(), mail_header_decode(v)) for k,v in msg.items())
payload = ( msg.get_payload(decode=True)
if not msg.is_multipart() else list(map(_mail_parse, msg.get_payload())) )
if not headers.get('content-type'): headers['content-type'] = [msg.get_content_type()]
if headers.get_core('content-disposition') == 'attachment': payload = '<attachment scrubbed>'
elif isinstance(payload, bytes):
payload = bytes_decode(payload, msg.get_content_charset() or 'utf-8', 'replace')
return MailMsg(headers, payload)
def mail_parse(msg):
if isinstance(msg, (bytes, str)): msg = email.message_from_bytes(msg)
return _mail_parse(msg)
class MailMsg(cs.namedtuple('MailMsg', 'headers payload')):
@property
def all_parts(self):
return [self] if isinstance(self.payload, str)\
else sorted(it.chain.from_iterable(m.all_parts for m in self.payload), key=len)
def _text_ct_prio(self, part):
ct = part.headers.get('content-type')
if ct == 'text/plain': return 1
if ct.startswith('text/'): return 2
return 3
@property
def text(self):
return sorted(self.all_parts, key=self._text_ct_prio)[0].payload
class MailMsgHeaders(cs.UserDict):
def __init__(self, header_list):
super().__init__()
for k, v in header_list:
if k not in self: self[k] = list()
self[k].append(v)
def get(self, k, default=None, proc=op.itemgetter(0)):
hs = self.data.get(k)
if not hs: return default
return proc(hs)
def get_core(self, k, default=None):
return self.get(k, default, lambda hs: hs[0].split(';', 1)[0].strip())
def get_all(self, k, default=None):
return self.get(k, default, lambda x: x)
def dump_msg(pre, msg):
msg = mail_parse(msg)
header_list = 'from to subject date message-id reply-to sender'.split()
# header_list = sorted(msg.headers.keys())
for k in header_list:
for v in msg.headers.get_all(k, list()): print(f'{pre}{k.title()}: {v}')
print(pre)
for line in msg.text.strip().split('\n'): print(f'{pre}{line}')
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Tool to find all messages in the maildir, decode'
' MIME msg bodies and dump every line in these along with the filename'
' to stdout to run grep or any other search on them to find specific msg/file.')
parser.add_argument('maildir', nargs='*', default=['~/.maildir'],
help='Path to maildir(s) or individual msg file(s). Default: %(default)s.')
opts = parser.parse_args(sys.argv[1:] if args is None else args)
log_err = ft.partial(print, file=sys.stderr, flush=True)
for p in opts.maildir:
p_root_base = p = pl.Path(p)
p_root = p_root_base.expanduser().resolve()
if p_root.is_file():
try: msg = dump_msg(f'{p}: ', p_root.read_bytes())
except email.errors.MessageParseError: log_err('Malformed msg file: {p}')
continue
ps_root = str(p_root)
maildir = mailbox.Maildir(ps_root)
box_dirs = [maildir, *(maildir.get_folder(key) for key in maildir.list_folders())]
for box in box_dirs:
for key in box.keys():
ps = str((pl.Path(box._path) / box._lookup(key)).resolve())
assert ps.startswith(ps_root), [ps_root, ps]
p = p_root_base / ps[len(ps_root)+1:]
try: msg = box[key]
except email.errors.MessageParseError: log_err('Malformed msg file: {p}')
else: dump_msg(f'{p}: ', msg)
if __name__ == '__main__': sys.exit(main())
| [
"mk.fraggod@gmail.com"
] | mk.fraggod@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.