blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22212b89866c682e600156e061acfef822671fd1 | a5d05e3cecfa6571016e41d19c982f1082714582 | /PROXYC.py | e125e22e4e8ce246a22c2fe83135f1a2f165446a | [] | no_license | Sanket-Mathur/CodeChef-Practice | 8ebc80eb9a32c90a5b3785348fca2048190dbeb0 | cba5bc2eaaf5489cbd8e85acaca6f82d223cff4f | refs/heads/master | 2023-08-08T05:59:42.755206 | 2021-09-26T12:44:15 | 2021-09-26T12:44:15 | 268,267,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | import math
for _ in range(int(input())):
N = int(input())
S = list(input())
Cp = S.count('P')
Ca = S.count('A')
req = math.ceil(N * 0.75)
if req <= Cp:
print(0)
else:
c = 0
for i in range(2,N-2):
if (S[i]=='A') and (S[i-1]=='P' or S[i-2]=='P') and (S[i+1]=='P' or S[i+2]=='P'):
c += 1
print(req-Cp if Cp+c>=req else -1)
| [
"rajeev.sanket@gmail.com"
] | rajeev.sanket@gmail.com |
785f17b8be057a2d4b4d69e3b7ba1879ff2d3dca | b1eac5e638273ddce5f7a9111676ecf1a7a0305a | /day1/selenium8.py | 5bfc2c56f17e9600606c08d8fb1b6aeb17298ec0 | [] | no_license | zhangbailong945/pachong | 081d4b79448ab01292d91011e6db4811784baa63 | 8af730989488ecfc09d40e96a4790ce1a6ce1714 | refs/heads/master | 2020-03-28T01:15:27.909917 | 2019-06-06T09:46:07 | 2019-06-06T09:46:07 | 147,490,458 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | #执行js
import sys,time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
chrome=webdriver.Chrome()
chrome.get('https://www.baidu.com')
chrome.implicitly_wait(3)
try:
chrome.execute_script('alert("111111111")')
finally:
chrome.close() | [
"1207549344@qq.com"
] | 1207549344@qq.com |
c6292bb43fa0041b229f80af33521753cd403b09 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03030/s045798606.py | 802f6b64e445e923406fd390ac302f6e446d0078 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | N = int(input())
l = []
for i in range(N):
s, p = input().split()
l.append([s, int(p), i+1])
# 地名に関して辞書順に / 点数に関して降順に並べる
l = sorted(l, key=lambda x:(x[0], -x[1]))
for i in range(N):
print(l[i][2]) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9f80173b3cf8f824c5a2554175f22621476761a1 | 7d9030094153bf363ba5690607bf496b0bb76009 | /script/Thread_pool.py | 44b8fbb80234a42e0b2260bf9ee1fedc71cfb46c | [] | no_license | stan12138/archive | 2773c309e59458000fb1eac44c6d3fc073bfc511 | 54478dc286712948913e3c9ca126015a8bb24bc8 | refs/heads/master | 2020-12-02T18:15:39.609495 | 2020-02-24T13:19:29 | 2020-02-24T13:19:29 | 96,503,611 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | import threading
import queue
__all__ = ["Work","ThreadPool"]
class WorkDone(Exception) :
pass
class WorkThread(threading.Thread) :
def __init__(self,work_queue,result_queue,timeout) :
threading.Thread.__init__(self)
#self.setDaemon(True) 已经被废弃了
self.daemon = True
self.work_queue = work_queue
self.result_queue = result_queue
self.timeout = timeout
self.dismiss = threading.Event()
self.start()
def run(self) :
while True:
if self.dismiss.is_set() :
break
try :
work = self.work_queue.get(True,self.timeout)
except queue.Empty :
continue
else :
if self.dismiss.is_set() :
self.work_queue.put(work)
break
try :
result = work.work_func(*work.args)
#print('%s is done'%work.work_ID)
self.result_queue.put((work,result))
except :
pass
def set_dismiss(self) :
self.dismiss.set()
class Work() :
def __init__(self,target=None,args=[],work_ID=None) :
if work_ID == None :
self.work_ID = id(self)
else :
try :
self.work_ID = hash(work_ID)
except :
print("workID must be hashable,this id can't use,we will set as default")
self.work_ID = id(self)
self.work_func = target
self.args = args
def __str__(self) :
return 'work thread id=%s args=%s'%(self.work_ID,self.args)
class ThreadPool(object):
def __init__(self,worker_num,work_size=0,result_size=0,timeout=5) :
self.work_queue = queue.Queue(work_size)
self.result_queue = queue.Queue(result_size)
self.timeout = timeout
self.workers = []
self.dismiss_workers = []
self.work = {}
self.creat_workers(worker_num)
def creat_workers(self,num) :
for i in range(num) :
self.workers.append(WorkThread(self.work_queue, self.result_queue, self.timeout))
def dismiss_thread(self,num,do_join=False) :
dismiss_list = []
num = min(num,len(self.workers))
for i in range(num) :
worker = self.workers.pop()
worker.set_dismiss()
dismiss_list.append(worker)
print('stop %s work thread and leave %s thread.....'%(num,len(self.workers)))
if do_join :
for i in dismiss_list :
i.join()
print('join all dismiss thread already...')
else :
self.dismiss_workers.extend(dismiss_list)
def join_dismiss_thread(self) :
for i in self.dismiss_workers :
i.join()
print('join %s dismiss workers already,now there are still %s workers...'%(len(self.dismiss_workers),len(self.workers)))
self.dismiss_workers = []
def put_work(self,work,block=True,timeout=None) :
if isinstance(work,Work) :
self.work_queue.put(work,block,timeout)
self.work[work.work_ID] = work
else :
print('work must be Work class,put failure.....')
#print('add one work')
def get_all_result(self,block=False) :
while True:
if not self.work :
raise WorkDone
try :
work, result = self.result_queue.get(block=block)
#print('got one result')
del self.work[work.work_ID]
except :
break
def stop(self) :
self.dismiss_thread(self.worker_num(),True)
self.join_dismiss_thread()
def worker_num(self) :
return len(self.workers)
def wait(self) :
while True:
try:
self.get_all_result(True)
except WorkDone:
print('work done!!!!')
break
if __name__ == "__main__" :
import datetime
def work(name,data) :
with open(name,'w') as fi :
fi.write(data)
print('write %s already'%name)
main = ThreadPool(5)
for i in range(10) :
main.put_work(Work(target=work,args=(str(i)+'.txt','hello')))
main.wait()
main.stop()
| [
"ihnyi@qq.com"
] | ihnyi@qq.com |
263a0ba65383bbc06082fdef23c848663ec54781 | 45f6a4dfc837998565d4e4e4cde258a27fdbd424 | /learn_tu_you/wx_superboss/trunk/hall37-newfish/src/newfish/player/poseidon_player.py | bb1fbcf1975f01557c0d822235d2c2cc2b615f06 | [] | no_license | isoundy000/learn_python | c220966c42187335c5342269cafc6811ac04bab3 | fa1591863985a418fd361eb6dac36d1301bc1231 | refs/heads/master | 2022-12-29T10:27:37.857107 | 2020-10-16T03:52:44 | 2020-10-16T03:52:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Auther: houguangdong
# @Time: 2020/7/17
import time
from freetime.entity.msg import MsgPack
from freetime.util import log as ftlog
from poker.entity.dao import gamedata
from poker.entity.configure import gdata
from newfish.entity.msg import GameMsg
from newfish.entity import config, util, change_notify, weakdata
from newfish.entity.lotterypool import poseidon_lottery_pool
from newfish.entity.config import FISH_GAMEID, TOWERIDS
from newfish.player.multiple_player import FishMultiplePlayer
from newfish.room.poseidon_room import Tower
from newfish.entity.redis_keys import GameData, WeakData
from newfish.servers.util.rpc import user_rpc
class FishPoseidonPlayer(FishMultiplePlayer):
pass | [
"1737785826@qq.com"
] | 1737785826@qq.com |
b71b29353e59f08f5782750c9968d379ea377173 | ce522e5edb852562d688be96d0c15294a0d9e66b | /ecommerce/checkout/migrations/0002_auto_20170320_0235.py | 0a52143404eb6550a78302aa47ac334a1bba2924 | [] | no_license | juniorcarvalho/django-ecommerce | 62c67f57d615afa47fc77ca3f738e966616b36d3 | c6511aed95719a65f349bd7caec052515ddbbe39 | refs/heads/master | 2021-01-19T12:55:13.824402 | 2017-03-21T02:57:41 | 2017-03-21T02:57:41 | 82,350,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-20 05:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
('checkout', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='cartitem',
unique_together=set([('cart_key', 'product')]),
),
]
| [
"joseadolfojr@gmail.com"
] | joseadolfojr@gmail.com |
f91277476203cfe568c65fc4eb763e51affe8f00 | 7482abade21b37b188cd4d7636fdcc9b59927847 | /projekt/primer.py | 56468f6fffdd38197a69a1c1f88ee9397d4d684d | [
"MIT"
] | permissive | evadezelak/OPB | f25a4924c680b2ee85d8e81e55cab1cfa4bdc717 | 425533f41660353a52abed439c85efc5dd801273 | refs/heads/master | 2020-05-17T07:48:22.224513 | 2019-04-18T11:57:33 | 2019-04-18T11:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# uvozimo bottle.py
from bottle import *
# uvozimo ustrezne podatke za povezavo
import auth_public as auth
# uvozimo psycopg2
import psycopg2, psycopg2.extensions, psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) # se znebimo problemov s šumniki
# odkomentiraj, če želiš sporočila o napakah
# debug(True)
@get('/static/<filename:path>')
def static(filename):
return static_file(filename, root='static')
@get('/')
def index():
cur.execute("SELECT * FROM oseba ORDER BY priimek, ime")
return template('komitenti.html', osebe=cur)
@get('/transakcije/:x/')
def transakcije(x):
cur.execute("SELECT * FROM transakcija WHERE znesek > %s ORDER BY znesek, id", [int(x)])
return template('transakcije.html', x=x, transakcije=cur)
@get('/dodaj_transakcijo')
def dodaj_transakcijo():
return template('dodaj_transakcijo.html', znesek='', racun='', opis='', napaka=None)
@post('/dodaj_transakcijo')
def dodaj_transakcijo_post():
znesek = request.forms.znesek
racun = request.forms.racun
opis = request.forms.opis
try:
cur.execute("INSERT INTO transakcija (znesek, racun, opis) VALUES (%s, %s, %s)",
(znesek, racun, opis))
except Exception as ex:
return template('dodaj_transakcijo.html', znesek=znesek, racun=racun, opis=opis,
napaka = 'Zgodila se je napaka: %s' % ex)
redirect("/")
######################################################################
# Glavni program
# priklopimo se na bazo
conn = psycopg2.connect(database=auth.db, host=auth.host, user=auth.user, password=auth.password)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # onemogočimo transakcije
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# poženemo strežnik na portu 8080, glej http://localhost:8000/
run(host='localhost', port=8000)
| [
"janos.vidali@fmf.uni-lj.si"
] | janos.vidali@fmf.uni-lj.si |
4e002e755d0c221b8396df31adcb81a4bffa5b2a | ba916d93dfb8074241b0ea1f39997cb028509240 | /python/sliding_window.py | 2279df01725d7098a84cbdb30106369deb9195a2 | [] | no_license | satojkovic/algorithms | ecc1589898c61d2eef562093d3d2a9a2d127faa8 | f666b215bc9bbdab2d2257c83ff1ee2c31c6ff8e | refs/heads/master | 2023-09-06T08:17:08.712555 | 2023-08-31T14:19:01 | 2023-08-31T14:19:01 | 169,414,662 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,302 | py | def find_average_subarrays(k, arr):
result = []
for i in range(len(arr) - k + 1):
avr = sum(arr[i:i+k]) / k
result.append(avr)
return result
def find_average_subarrays_window(k, arr):
result = []
window_sum, left = 0.0, 0
for right in range(len(arr)):
window_sum += arr[right]
if right >= k - 1:
result.append(window_sum / k)
window_sum -= arr[left]
left += 1
return result
def test_find_average_subarrays():
k = 5
arr = [1, 3, 2, 6, -1, 4, 1, 8, 2]
ans = [2.2, 2.8, 2.4, 3.6, 2.8]
assert find_average_subarrays(k, arr) == ans
assert find_average_subarrays_window(k, arr) == ans
assert find_average_subarrays(1, [10]) == [10]
assert find_average_subarrays_window(1, [10]) == [10]
assert find_average_subarrays(2, [123]) == []
assert find_average_subarrays_window(2, [123]) == []
def max_sum_subarray(k, arr):
import sys
max_sum = -sys.maxsize
curr_sum, left = 0.0, 0
for right in range(len(arr)):
curr_sum += arr[right]
if right >= k - 1:
max_sum = curr_sum if curr_sum > max_sum else max_sum
curr_sum -= arr[left]
left += 1
return max_sum
def test_max_sum_subarray():
assert max_sum_subarray(3, [2, 1, 5, 1, 3, 2]) == 9
assert max_sum_subarray(2, [2, 3, 4, 1, 5]) == 7
assert max_sum_subarray(2, [1, 2, -3]) == 3
assert max_sum_subarray(2, [-3, 2, -5, -9]) == -1
assert max_sum_subarray(2, [1, 2, -3, 9]) == 6
def max_substring_with_k_distinct_chars(k, s):
from collections import defaultdict
char_freqs = defaultdict(int)
left = 0
max_length = 0
for right in range(len(s)):
char_freqs[s[right]] += 1
while len(char_freqs) > k:
left_char = s[left]
char_freqs[left_char] -= 1
if char_freqs[left_char] == 0:
del char_freqs[left_char]
left += 1
max_length = max(max_length, right - left + 1)
return max_length
def test_max_susbstring_with_k_distinct_chars():
assert max_substring_with_k_distinct_chars(2, 'araaci') == 4
assert max_substring_with_k_distinct_chars(1, 'araaci') == 2
assert max_substring_with_k_distinct_chars(3, 'cbbebi') == 5
| [
"satojkovic@gmail.com"
] | satojkovic@gmail.com |
dc0732bc17c1d9bd0f61168d8585bf9ebcd8dcc7 | 19ae613228d539deb768ece8b65e1f50a610bab6 | /pddl/pddl.py | 296e3e4b75acebd41edb2508ab767a5ada2e43be | [] | no_license | hyzcn/Bat-leth | cf7166c8c465bfabd3abf78ea712af95eff42ab3 | 1fc9c033d89f4d8a758f57e539622c4a36f1811b | refs/heads/master | 2021-06-07T20:54:21.554291 | 2016-10-16T01:04:21 | 2016-10-16T01:04:21 | 255,225,651 | 0 | 1 | null | 2020-04-13T03:50:15 | 2020-04-13T03:50:15 | null | UTF-8 | Python | false | false | 5,333 | py | #
# This file is part of pyperplan.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
"""
This module contains all data structures needed to represent a PDDL domain and
possibly a task definition.
"""
class Type:
"""
This class represents a PDDL type.
"""
def __init__(self, name, parent):
self.name = name.lower()
self.parent = parent
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Predicate:
def __init__(self, name, signature):
"""
name: The name of the predicate.
signature: A list of tuples (name, [types]) to represent a list of
parameters and their type(s).
"""
self.name = name
self.signature = signature
def __repr__(self):
return self.name + str(self.signature)
def __str__(self):
return self.name + str(self.signature)
class Quantifier:
def __init__(self, name, signature, predicates):
"""
name: The name of the predicate.
signature: A list of tuples (name, [types]) to represent a list of
parameters and their type(s) which are in the scope.
predicate: A list of predicates
"""
self.name = name #exists | forall
self.signature = signature
self.predicates = predicates
def __repr__(self):
return self.name + str(self.signature) + str(self.predicates)
def __str__(self):
return self.name + str(self.signature) + str(self.predicates)
# Formula is unused right now!
#class Formula:
# def __init__(self, operator, operands=[]):
# # right now we only need AND
# self._operator = operator # 'AND' | 'OR' | 'NOT'
# self._operands = operands
#
# def getOperator(self):
# return self._operator
# operator = property(getOperator)
#
# def getOperands(self):
# return self._operands
# operands = property(getOperands)
class Effect:
def __init__(self):
"""
addlist: Set of predicates that have to be true after the action
dellist: Set of predicates that have to be false after the action
"""
self.addlist = set()
self.dellist = set()
class Action:
def __init__(self, name, signature, precondition, effect, decomp=None):
"""
name: The name identifying the action
signature: A list of tuples (name, [types]) to represent a list of
parameters an their type(s).
precondition: A list of predicates that have to be true before the
action can be applied
effect: An effect instance specifying the postcondition of the action
"""
self.name = name
self.signature = signature
self.precondition = precondition
self.effect = effect
self.decomp = decomp
class Domain:
def __init__(self, name, types, predicates, actions, constants={}):
"""
name: The name of the domain
types: A dict of typename->Type instances in the domain
predicates: A list of predicates in the domain
actions: A list of actions in the domain
constants: A dict of name->type pairs of the constants in the domain
"""
self.name = name
self.types = types
self.predicates = predicates
self.actions = actions
self.constants = constants
def __repr__(self):
return ('< Domain definition: %s Predicates: %s Actions: %s '
'Constants: %s >' % (self.name,
[str(p) for p in self.predicates],
[str(a) for a in self.actions],
[str(c) for c in self.constants]))
__str__ = __repr__
class Problem:
def __init__(self, name, domain, objects, init, goal):
"""
name: The name of the problem
domain: The domain in which the problem has to be solved
story_objs: A dict name->type of story_objs that are used in the problem
init: A list of predicates describing the initial state
goal: A list of predicates describing the goal state
"""
self.name = name
self.domain = domain
self.objects = objects
self.initial_state = init
self.goal = goal
def __repr__(self):
return ('< Problem definition: %s '
'Domain: %s Objects: %s Initial State: %s Goal State : %s >' %
(self.name, self.domain.name,
[self.objects[o].name for o in self.objects],
[str(p) for p in self.initial_state],
[str(p) for p in self.goal]))
__str__ = __repr__
| [
"drwiner131@gmail.com"
] | drwiner131@gmail.com |
b2a52cd3a7ad6f3b0a3ac55ff2c6147a0ded178e | b1303152c3977a22ff9a0192c0c32310e65a6d77 | /python/530.minimum-absolute-difference-in-bst.py | a3a1a96c14fc0afec10ff9a53dfcfe8ba839397c | [
"Apache-2.0"
] | permissive | stavanmehta/leetcode | 1b8da1c2bfacaa76ddfb96b8dbce03bf08c54c27 | 1224e43ce29430c840e65daae3b343182e24709c | refs/heads/master | 2021-07-15T16:02:16.107962 | 2021-06-24T05:39:14 | 2021-06-24T05:39:14 | 201,658,706 | 0 | 0 | Apache-2.0 | 2021-06-24T05:39:15 | 2019-08-10T16:59:32 | Java | UTF-8 | Python | false | false | 247 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
| [
"noreply@github.com"
] | stavanmehta.noreply@github.com |
eb96e3855d329c0fad9ec94c2f1a5316a047fe52 | 134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2 | /desktop/core/ext-py/django_nose/django_nose/nose_runner.py | 3cb8d1e63c08da8d09673c4aa7912ef1358027e8 | [
"Apache-2.0"
] | permissive | civascu/hue | 22637f13a4cfc557716557661523131b6ac16da4 | 82f2de44789ff5a981ed725175bae7944832d1e9 | refs/heads/master | 2020-03-31T01:50:39.449966 | 2010-07-21T01:05:50 | 2010-07-21T01:07:15 | 788,284 | 0 | 0 | Apache-2.0 | 2019-02-04T07:03:12 | 2010-07-21T07:34:27 | Python | UTF-8 | Python | false | false | 2,660 | py | """
Django test runner that invokes nose.
Usage:
./manage.py test DJANGO_ARGS -- NOSE_ARGS
The 'test' argument, and any other args before '--', will not be passed
to nose, allowing django args and nose args to coexist.
You can use
NOSE_ARGS = ['list', 'of', 'args']
in settings.py for arguments that you always want passed to nose.
"""
import sys
from django.conf import settings
from django.db import connection
from django.test import utils
import nose
SETUP_ENV = 'setup_test_environment'
TEARDOWN_ENV = 'teardown_test_environment'
def get_test_enviroment_functions():
"""The functions setup_test_environment and teardown_test_environment in
<appname>.tests modules will be automatically called before and after
running the tests.
"""
setup_funcs = []
teardown_funcs = []
for app_name in settings.INSTALLED_APPS:
mod = __import__(app_name, None, None, ['tests'])
if hasattr(mod, 'tests'):
if hasattr(mod.tests, SETUP_ENV):
setup_funcs.append(getattr(mod.tests, SETUP_ENV))
if hasattr(mod.tests, TEARDOWN_ENV):
teardown_funcs.append(getattr(mod.tests, TEARDOWN_ENV))
return setup_funcs, teardown_funcs
def setup_test_environment(setup_funcs):
utils.setup_test_environment()
for func in setup_funcs:
func()
def teardown_test_environment(teardown_funcs):
utils.teardown_test_environment()
for func in teardown_funcs:
func()
def run_tests_explicit(nose_args, verbosity=1, interactive=True):
"""Setup django and run nose with given arguments."""
setup_funcs, teardown_funcs = get_test_enviroment_functions()
# Prepare django for testing.
setup_test_environment(setup_funcs)
old_db_name = settings.DATABASE_NAME
connection.creation.create_test_db(verbosity, autoclobber=not interactive)
# Pretend it's a production environment.
settings.DEBUG = False
ret = nose.run(argv=nose_args)
# Clean up django.
connection.creation.destroy_test_db(old_db_name, verbosity)
teardown_test_environment(teardown_funcs)
return ret
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
"""Calculates nose arguments and runs tests."""
nose_argv = ['nosetests']
if hasattr(settings, 'NOSE_ARGS'):
nose_argv.extend(settings.NOSE_ARGS)
# Everything after '--' is passed to nose.
if '--' in sys.argv:
hyphen_pos = sys.argv.index('--')
nose_argv.extend(sys.argv[hyphen_pos + 1:])
if verbosity >= 1:
print ' '.join(nose_argv)
return run_tests_explicit(nose_argv, verbosity, interactive)
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
8dffd4a74543cb7509f054827c210076c6e09a40 | 7bd0954e956993df19d833810f9d71b60e2ebb9a | /test/matrix/test_LIGO_noise.py | 07feafe27f250c7076da65d882d3fdc1023562bf | [
"Apache-2.0"
] | permissive | aa158/phasor | 5ee0cec4f816b88b0a8ac298c330ed48458ec3f2 | fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d | refs/heads/master | 2021-10-22T09:48:18.556091 | 2019-03-09T18:56:05 | 2019-03-09T18:56:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from os import path
import numpy as np
import declarative
import numpy.testing as np_test
import pytest
from declarative.bunch import (
DeepBunch
)
from phasor.utilities.np import logspaced
from phasor import system
from ligo_sled import (
LIGOBasicOperation
)
import pickle
try:
stresstest = pytest.mark.skipif(
not pytest.config.getoption("--do-stresstest"),
reason="need --do-stresstest option to run"
)
except AttributeError:
#needed for importing when py.test isn't in test mode
stresstest = lambda x : x
@stresstest
def test_LIGO_noise_inversion():
with open(path.join(path.split(__file__)[0], 'aLIGO_outspec.pckl'), 'rb') as F:
output = declarative.Bunch(pickle.load(F))
def test_inverse():
db = DeepBunch()
db.det.input.PSL.power.val = 27 * 7
db.det.input.PSL.power.units = 'W'
db.det.LIGO.S_BS_IX.L_detune.val = 1064e-9 * .001
db.det.LIGO.S_BS_IX.L_detune.units = 'm'
db.det.output.AS_efficiency_percent = 85
db.environment.F_AC.frequency.val = logspaced(.5, 10000, 1000)
sys = system.BGSystem(
ctree = db,
solver_name = 'loop_LUQ',
)
sys.own.det = LIGOBasicOperation()
print(sys.det.LIGO.YarmDC.DC_readout)
print(sys.det.LIGO.XarmDC.DC_readout)
print(sys.det.LIGO.REFLDC.DC_readout)
print(sys.det.LIGO.POPTrueDC.DC_readout)
print(sys.det.output.ASPD_DC.DC_readout)
readoutI = sys.det.output.ASPDHD_AC
ASPDHD_AC_nls = readoutI.AC_noise_limited_sensitivity
rel = (ASPDHD_AC_nls / output.ASPDHD_AC_nls).real
print("RELMINMAX: ", np.min(rel), np.max(rel))
np_test.assert_almost_equal(
rel, 1, 2
)
for i in range(20):
test_inverse()
| [
"Lee.McCuller@gmail.com"
] | Lee.McCuller@gmail.com |
db803a0586142b52f75809ffd21a1d35b32ff2a4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2976/60705/263776.py | 3a732b098934518bb61117ea96d97db82cacde9c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | short = input()
length = len(short)
k = 1
while k < 20:
try:
a = input()
except EOFError:
k += 1
continue
len_of_a = len(a)
# 删除短字符串
i = 0
while i + length <= len_of_a:
if short == a[i:i+length]:
a = a[0:i] + a[i+length:len(a)]
len_of_a -= length
i = -1
i += 1
# 删除空格
i = 0
while i < len_of_a:
if a[i] == " ":
a = a[0:i] + a[i + 1:len(a)]
len_of_a -= 1
i -= 1
i += 1
if a == 'printf("Hi")':
a = a.replace("H", "")
print(a)
k += 1
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
4adc28cf02ad73151e0d6daf883933c57205b21f | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/sword2offer/000剑指0_Offer_29._顺时针打印矩阵.py | 583332df292ce5dd4b00535341ccffb0c19c9cf1 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | '''
执行用时:24 ms, 在所有 Python 提交中击败了87.79%的用户
内存消耗:13.5 MB, 在所有 Python 提交中击败了94.97%的用户
'''
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
M, N = len(matrix), len(matrix[0]) if matrix else 0
top, left, bottom, right = 0, 0, M - 1, N - 1
spiral = []
while left <= right and top <= bottom:
# left to right
for j in range(left, right + 1):
spiral.append(matrix[top][j])
# right to bottom
for i in range(top + 1, bottom + 1):
spiral.append(matrix[i][right])
if left < right and top < bottom:
# right to left
for j in range(right - 1, left - 1, -1):
spiral.append(matrix[bottom][j])
# bottom to top
for i in range(bottom - 1, top, -1):
spiral.append(matrix[i][left])
top += 1
left += 1
right -= 1
bottom -= 1
return spiral
'''
逆时针旋转 == (转置+倒序)
转置: zip
倒序: [:: -1]
执行用时:36 ms, 在所有 Python 提交中击败了36.27%的用户
内存消耗:14.1 MB, 在所有 Python 提交中击败了9.69%的用户
'''
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
spiral = []
while matrix:
spiral.extend(matrix[0])
matrix = zip(*matrix[1:])[::-1]
return spiral
| [
"838255715@qq.com"
] | 838255715@qq.com |
413d10d141e21938b4e969cb4513dd7b41f93f96 | d54e1b89dbd0ec5baa6a018464a419e718c1beac | /Python from others/飞机大战/wk_11_事件退出事件.py | 60da8d84e1ea8c3b53ded2f4b9a0bad6839fe54c | [] | no_license | cjx1996/vscode_Pythoncode | eda438279b7318e6cb73211e26107c7e1587fdfb | f269ebf7ed80091b22334c48839af2a205a15549 | refs/heads/master | 2021-01-03T19:16:18.103858 | 2020-05-07T13:51:31 | 2020-05-07T13:51:31 | 240,205,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | import pygame
# 游戏的初始化
pygame.init()
# 创建游戏的窗口
screen = pygame.display.set_mode((480, 700))
# 绘制背景图像
bg = pygame.image.load("./images/background.png")
screen.blit(bg, (0, 0))
# 绘制英雄的飞机
hero = pygame.image.load("./images/me1.png")
# screen.blit(hero, (200, 500))
# 可以在所有绘制工作完成之后, 统一调用 update 方法
# pygame.display.update()
# 创建时钟对象
clock = pygame.time.Clock()
# 1. 定义rect记录飞机的初始位置
hero_rect = pygame.Rect(150, 300, 102, 126)
while True:
# 可以指定循环体内部的代码执行频率
clock.tick(60)
# 监听事件
for event in pygame.event.get():
# 判断事件类型是否是退出事件
if event.type == pygame.QUIT:
print("游戏退出...")
# quit 卸载所有的模块
pygame.quit()
# exit() 直接终止当前正在执行的程序
exit()
# 2. 修改飞机的位置
hero_rect.y -= 1
if hero_rect.y <= -126:
hero_rect.y = 700
# 3. 调用blit方法绘制图像
screen.blit(bg, (0, 0))
screen.blit(hero, hero_rect)
# 4. 调用update方法更新显示
pygame.display.update()
pygame.quit()
| [
"1121287904@qq.com"
] | 1121287904@qq.com |
a4ee19fffd100a3fc05a6e021b724a07d4482aad | 459929ce79538ec69a6f8c32e608f4e484594d68 | /venv/Lib/site-packages/kubernetes/client/models/extensions_v1beta1_deployment_list.py | 10b69c3d65178e80e977c84bfa9479ff4e264369 | [] | no_license | yychai97/Kubernetes | ec2ef2a98a4588b7588a56b9d661d63222278d29 | 2955227ce81bc21f329729737b5c528b02492780 | refs/heads/master | 2023-07-02T18:36:41.382362 | 2021-08-13T04:20:27 | 2021-08-13T04:20:27 | 307,412,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,727 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ExtensionsV1beta1DeploymentList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[ExtensionsV1beta1Deployment]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""ExtensionsV1beta1DeploymentList - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this ExtensionsV1beta1DeploymentList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this ExtensionsV1beta1DeploymentList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this ExtensionsV1beta1DeploymentList. # noqa: E501
Items is the list of Deployments. # noqa: E501
:return: The items of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: list[ExtensionsV1beta1Deployment]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ExtensionsV1beta1DeploymentList.
Items is the list of Deployments. # noqa: E501
:param items: The items of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: list[ExtensionsV1beta1Deployment]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this ExtensionsV1beta1DeploymentList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this ExtensionsV1beta1DeploymentList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this ExtensionsV1beta1DeploymentList. # noqa: E501
:return: The metadata of this ExtensionsV1beta1DeploymentList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ExtensionsV1beta1DeploymentList.
:param metadata: The metadata of this ExtensionsV1beta1DeploymentList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtensionsV1beta1DeploymentList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"49704239+yychai97@users.noreply.github.com"
] | 49704239+yychai97@users.noreply.github.com |
fb1a0fe8be3323847c1589104e7ad955265f9f5a | 94487ea9d7d2bbdf46797fc5bf82fee45cf23db5 | /tests/python/unittest/test_tir_schedule_set_axis_separator.py | 102b3d1cd71062ee45a0b251ca94cf6c3217bb9c | [
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | were/tvm | 9cc379dac1bcd9ae83b133a313db75f5a63640f6 | afb67e64a1891e1d1aab03c4614fca11473e7b27 | refs/heads/master | 2022-11-22T15:47:02.888421 | 2022-05-28T00:10:40 | 2022-05-28T00:10:40 | 146,328,333 | 3 | 0 | Apache-2.0 | 2018-08-27T17:03:20 | 2018-08-27T17:03:19 | null | UTF-8 | Python | false | false | 6,121 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.tir import IndexMap
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_set_axis_separator_input_buffer(A: T.Buffer(shape=(128, 128), dtype="float32", axis_separators=(1,)), C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[i, j], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[i, j], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_axis_separator(A: T.Buffer[(128, 128), "float32"], C: T.Buffer[(128, 128), "float32"]) -> None:
B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1])
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[i, j], [], dtype="float32", offset_factor=1, axis_separators=[1])
B_subregion0[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[i, j], [], dtype="float32", offset_factor=1, axis_separators=[1])
C[vi, vj] = B_subregion1[()] + T.float32(1)
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
use_sugared_transform = tvm.testing.parameter(
by_dict={"set_axis_separators": False, "transform_layout_sugared": True}
)
def test_set_axis_separator(use_sugared_transform):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if use_sugared_transform:
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
else:
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
tvm.ir.assert_structural_equal(element_wise_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_index_out_of_bound():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("write",1),[1])
with pytest.raises(AssertionError):
s.set_axis_separator(s.get_block("B"), ("read",-1),[1])
def test_set_axis_separator_input_buffer(use_sugared_transform):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
if use_sugared_transform:
s.transform_layout(block='B', buffer='A', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
s.set_axis_separator(s.get_block("B"), ("read",0), [1])
tvm.ir.assert_structural_equal(element_wise_set_axis_separator_input_buffer, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_axis_separator_subregion(use_sugared_transform):
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
if use_sugared_transform:
s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j])
else:
s.set_axis_separator(s.get_block("B"), ("write",0), [1])
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_axis_separator, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
if __name__ == "__main__":
tvm.testing.main()
| [
"noreply@github.com"
] | were.noreply@github.com |
b8f8c7887b4161a0796f663e1360ff23717fcf82 | 3e4b8fe54f11bf36f3615c21fdc1dca0ed00fe72 | /month01/code/day08/shopping.py | afbad4e6ba4720b418384e8528526145ab6bd682 | [] | no_license | leinian85/year2019 | 30d66b1b209915301273f3c367bea224b1f449a4 | 2f573fa1c410e9db692bce65d445d0543fe39503 | refs/heads/master | 2020-06-21T20:06:34.220046 | 2019-11-04T06:37:02 | 2019-11-04T06:37:02 | 197,541,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,483 | py | commodity_info = {
101: {"name": "屠龙刀", "price": 10000},
102: {"name": "倚天剑", "price": 10000},
103: {"name": "九阴白骨爪", "price": 8000},
104: {"name": "九阳神功", "price": 9000},
105: {"name": "降龙十八掌", "price": 8000},
106: {"name": "乾坤大挪移", "price": 10000}
}
order = {}
def commodity_list():
"""
显示商品明细
:return:
"""
for key, value in commodity_info.items():
print("编号:%d,名称:%s,单价:%d。" % (key, value["name"], value["price"]))
def buy():
"""
购买商品
:return:
"""
commodity_list()
while True:
cid = int(input("请输入商品编号:"))
if cid in commodity_info:
break
else:
print("该商品不存在")
count = int(input("请输入购买数量:"))
if cid not in order:
order[cid] = count
else:
order[cid] += count
print("添加到购物车。")
def shopping_list():
"""
计算购物的总金额
:return: 返回总金额
"""
total_money = 0
for cid,count in order.items():
commodity = commodity_info[cid]
print("商品:%s,单价:%d,数量:%d." % (commodity["name"], commodity["price"], count))
total_money += commodity["price"] * count
return total_money
def square():
"""
商品结算
:return:
"""
if bool(order):
total_money = shopping_list()
while True:
str_money = input("总价%d元,请输入金额:" % total_money)
if str_money == "":
str_out = input("退出请按'Y':")
if str_out== "Y":
order.clear()
break
else:
money = float(str_money)
if money >= total_money:
print("购买成功,找回:%d元。" % (money - total_money))
order.clear()
break
else:
print("金额不足.")
else:
print("你未购买任何物品,不需要结算")
def shopping():
"""
购物
:return:
"""
while True:
item = input("1键购买,2键结算。")
if item == "1":
buy()
elif item == "2":
square()
else:
break
shopping() | [
"42737521@qq.com"
] | 42737521@qq.com |
e606fb13a685824d01e0a0355fcc0f0aa2b9a8da | 6c42b234cba1f077dc306242ad1973d56f812343 | /beginner_tasks/strings.py | 82cd9c20d1567b20f2865c2fe8e03db530f63596 | [] | no_license | wencakisa/Python-Dev | 330a9ba3a8320f8e1fa5bfb86c85b24253361a6a | 511d307b6f64174002112cadcdbd0e23c1d69b70 | refs/heads/master | 2021-01-17T13:23:22.939323 | 2016-08-02T14:57:36 | 2016-08-02T14:57:36 | 59,685,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import re
def slice_str(string, max_length):
if max_length < len(string):
return string[: max_length] + "..."
return string
def show_after(string, after):
return string[re.search(r'\s{}\s'.format(after), string).span()[1]:]
def main():
string = "This is soo difficult, I prefer playing WoW"
after = "is"
print(show_after(string, after))
if __name__ == '__main__':
main()
| [
"wencakisa@gmail.com"
] | wencakisa@gmail.com |
4e7130c92ca3727848a0d762125ea88d8243c716 | da052c0bbf811dc4c29a83d1b1bffffd41becaab | /core/stock_by_location/__openerp__.py | 9d07484e4fbf097c623c67fd130a188a963c3698 | [] | no_license | Muhammad-SF/Test | ef76a45ad28ac8054a4844f5b3826040a222fb6e | 46e15330b5d642053da61754247f3fbf9d02717e | refs/heads/main | 2023-03-13T10:03:50.146152 | 2021-03-07T20:28:36 | 2021-03-07T20:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 BrowseInfo(<http://www.browseinfo.in>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stocks By Location',
'version': '1.1.3',
'category': 'Warehouse',
'sequence': 14,
'price': '25',
'currency': "EUR",
'summary': '',
'description': """
-Stock Balance by Location
-Stock Quantity by location
-Location based stock
-Display Product Quantity based on stock.
-Warehouse stock based on location
-Stock Quantity based on location
-Stock by location
-Stock qty by location
-Stock location
""",
'author': 'BrowseInfo',
'website': 'http://www.browseinfo.in',
'images': [],
'depends': ['base','sale','stock', 'inventory_reserved_available_qty'],
'data': [
'product.xml',
],
'installable': True,
'auto_install': False,
"images":['static/description/Banner.png'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"jbalu2801@gmail.com"
] | jbalu2801@gmail.com |
ddf9db09959a2290f75dfe464a502b92e03bf010 | ce285e8e855137888552e55083e19838fab3afda | /settings/common.py | 226e17ddba151670455e45a4f50c668643b365b0 | [] | no_license | bmarchenko/traveler | 4215b5596f2ea70796ea1ff4d21342aa3cf6ccff | 5af5a302677dd3037e7e1b52d7e5ef49dd41cf86 | refs/heads/master | 2016-08-11T20:33:16.410335 | 2012-11-10T22:48:19 | 2012-11-10T22:48:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | # Django settings for your project.
import conf.environment
import os
LOCAL = False
SITE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wsgi.application'
ROOT_URLCONF = 'urls'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.\
os.path.join(SITE_ROOT, 'static'),
)
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates')
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sdliu298sdf2398fqwf2089asdfasdfu098u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"traveler.context_processors.nav_content",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.comments',
'south',
'traveler',
'taggit',
'inlines',
'blog',
'hadrian.contrib.locations',
'gallery',
'sorl.thumbnail',
'django_extensions',
'bootstrap',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
}
}
}
| [
"dstegelman@gmail.com"
] | dstegelman@gmail.com |
cdfd9f0abdd1724bd2da56b313d4938752e38da7 | a691e764b10453c69e040abfa6841d25b622beba | /orquesta/composers/native.py | f025177d6f03821eb4e085cdc882ebde1fc3d98c | [
"Apache-2.0"
] | permissive | alertlogic/orquesta | ee0952c5e79663e4c928e6028e0cf514c55359d4 | 68fddf0ab312cca35616fcb3815966ab2fe83edb | refs/heads/master | 2023-08-15T23:01:10.836310 | 2021-10-14T16:20:49 | 2021-10-14T16:20:49 | 405,152,762 | 0 | 0 | Apache-2.0 | 2021-09-10T18:24:28 | 2021-09-10T16:56:38 | null | UTF-8 | Python | false | false | 4,312 | py | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from six.moves import queue
from orquesta.composers import base as comp_base
from orquesta import graphing
from orquesta.specs import native as native_specs
LOG = logging.getLogger(__name__)
class WorkflowComposer(comp_base.WorkflowComposer):
wf_spec_type = native_specs.WorkflowSpec
@classmethod
def compose(cls, spec):
if not cls.wf_spec_type:
raise TypeError("Undefined spec type for composer.")
if not isinstance(spec, cls.wf_spec_type):
raise TypeError('Unsupported spec type "%s".' % str(type(spec)))
return cls._compose_wf_graph(spec)
@classmethod
def _compose_wf_graph(cls, wf_spec):
if not isinstance(wf_spec, cls.wf_spec_type):
raise TypeError("Workflow spec is not typeof %s." % cls.wf_spec_type.__name__)
q = queue.Queue()
wf_graph = graphing.WorkflowGraph()
for task_name, condition, task_transition_item_idx in wf_spec.tasks.get_start_tasks():
q.put((task_name, []))
while not q.empty():
task_name, splits = q.get()
wf_graph.add_task(task_name)
if wf_spec.tasks.is_join_task(task_name):
task_spec = wf_spec.tasks[task_name]
barrier = "*" if task_spec.join == "all" else task_spec.join
wf_graph.set_barrier(task_name, value=barrier)
# Determine if the task is a split task and if it is in a cycle. If the task is a
# split task, keep track of where the split(s) occurs.
if wf_spec.tasks.is_split_task(task_name) and not wf_spec.tasks.in_cycle(task_name):
splits.append(task_name)
if splits:
wf_graph.update_task(task_name, splits=splits)
# Update task attributes if task spec has retry criteria.
task_spec = wf_spec.tasks.get_task(task_name)
if task_spec.has_retry():
retry_spec = {
"when": getattr(task_spec.retry, "when", None),
"count": getattr(task_spec.retry, "count", None),
"delay": getattr(task_spec.retry, "delay", None),
}
wf_graph.update_task(task_name, retry=retry_spec)
# Add task transition to the workflow graph.
next_tasks = wf_spec.tasks.get_next_tasks(task_name)
for next_task_name, condition, task_transition_item_idx in next_tasks:
if next_task_name == "retry":
retry_spec = {"when": condition or "<% completed() %>", "count": 3}
wf_graph.update_task(task_name, retry=retry_spec)
continue
if not wf_graph.has_task(next_task_name) or not wf_spec.tasks.in_cycle(
next_task_name
):
q.put((next_task_name, list(splits)))
crta = [condition] if condition else []
seqs = wf_graph.has_transition(
task_name, next_task_name, criteria=crta, ref=task_transition_item_idx
)
# Use existing transition if present otherwise create new transition.
if seqs:
wf_graph.update_transition(
task_name,
next_task_name,
key=seqs[0][2],
criteria=crta,
ref=task_transition_item_idx,
)
else:
wf_graph.add_transition(
task_name, next_task_name, criteria=crta, ref=task_transition_item_idx
)
return wf_graph
| [
"m4d.coder@gmail.com"
] | m4d.coder@gmail.com |
c44a31bc672dd09d76b28c6336bbe521e2267744 | 409ce560793c070ef4211b99c5a4a5316a258c4f | /pylith/meshio/DataWriterVTK.py | 09ebe3ec0aea184c8dbba3a476889a1d9c618937 | [
"MIT"
] | permissive | calum-chamberlain/pylith | bb718bfb4305f03b45d42348e5d4fa5ed5f4a918 | 8712c39ade53c1cc5ac0e671e4296cee278c1dcf | refs/heads/master | 2020-12-06T17:15:08.638337 | 2016-05-15T20:30:28 | 2016-05-15T20:30:28 | 46,401,744 | 0 | 0 | null | 2016-05-15T20:30:29 | 2015-11-18T07:09:12 | C++ | UTF-8 | Python | false | false | 3,366 | py | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2015 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pyre/meshio/DataWriterVTK.py
##
## @brief Python object for writing finite-element data to VTK file.
from DataWriter import DataWriter
from meshio import DataWriterVTK as ModuleDataWriterVTK
# DataWriterVTK class
class DataWriterVTK(DataWriter, ModuleDataWriterVTK):
"""
Python object for writing finite-element data to VTK file.
Inventory
\b Properties
@li \b filename Name of VTK file.
@li \b time_format C style format string for time stamp in filename.
@li \b time_constant Value used to normalize time stamp in filename.
\b Facilities
@li None
"""
# INVENTORY //////////////////////////////////////////////////////////
import pyre.inventory
filename = pyre.inventory.str("filename", default="output.vtk")
filename.meta['tip'] = "Name of VTK file."
timeFormat = pyre.inventory.str("time_format", default="%f")
timeFormat.meta['tip'] = "C style format string for time stamp in filename."
from pyre.units.time import second
timeConstant = pyre.inventory.dimensional("time_constant",
default=1.0*second,
validator=pyre.inventory.greater(0.0*second))
timeConstant.meta['tip'] = "Values used to normalize time stamp in filename."
precision = pyre.inventory.int("float_precision", default=6,
validator=pyre.inventory.greater(0))
precision.meta['tip'] = "Precision of floating point values in output."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="datawritervtk"):
"""
Constructor.
"""
DataWriter.__init__(self, name)
ModuleDataWriterVTK.__init__(self)
return
def initialize(self, normalizer):
"""
Initialize writer.
"""
DataWriter.initialize(self, normalizer, self.filename)
timeScale = normalizer.timeScale()
timeConstantN = normalizer.nondimensionalize(self.timeConstant, timeScale)
ModuleDataWriterVTK.filename(self, self.filename)
ModuleDataWriterVTK.timeScale(self, timeScale.value)
ModuleDataWriterVTK.timeFormat(self, self.timeFormat)
ModuleDataWriterVTK.timeConstant(self, timeConstantN)
ModuleDataWriterVTK.precision(self, self.precision)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Configure object.
"""
try:
DataWriter._configure(self)
except ValueError, err:
aliases = ", ".join(self.aliases)
raise ValueError("Error while configuring VTK output "
"(%s):\n%s" % (aliases, err.message))
return
# FACTORIES ////////////////////////////////////////////////////////////
def data_writer():
"""
Factory associated with DataWriter.
"""
return DataWriterVTK()
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
f324e10f5052054d2e3506b49f197c47921214b8 | 8ec05f1d5800e0b98afa92367f74bed9f95e0ee9 | /venv/Scripts/autopep8-script.py | 4a622cfc0daaa5f5bad725211ff99e47d1376171 | [] | no_license | ayanchyaziz123/ecom-final-year-project | 28362922a88c71aba29d22f29c7f34e1cad6189f | d21fdd885b3b768935dc29171c5a6761c4b88e9c | refs/heads/master | 2023-08-12T17:10:23.826744 | 2021-10-06T12:36:17 | 2021-10-06T12:36:17 | 405,435,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #!f:\proshop_django-master\venv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'autopep8==1.5.4','console_scripts','autopep8'
__requires__ = 'autopep8==1.5.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('autopep8==1.5.4', 'console_scripts', 'autopep8')()
)
| [
"aaziz9642@gmail.com"
] | aaziz9642@gmail.com |
f004bf5cdc9d3ec9a3989848533cffae640eb624 | ced2dc1f67830f70bc8449b864a5ddf9858a6f76 | /CMSSW_9_4_12/src/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysispri_M4500_R_0-5.py | 73cdd810f68a21cb46fd834007aa58975b8517cb | [] | no_license | xdlyu/16_MINIAODV3 | 6e1c455a17e8453974b200c05da18a81386936fe | 5f506cb0e3411fe85bc0b86d6f9477ca7d46eea3 | refs/heads/master | 2020-12-13T03:46:54.895084 | 2020-01-17T14:47:22 | 2020-01-17T14:47:22 | 234,304,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4500_R0-5_off_pri'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt','L1PrefiringMaps_new.root']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.sendExternalFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/VVV-4500-R05_hpp/qili-crab_VVV_MiniAOD_v1-05265a4e675f881e1e171fd06785811b/USER'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =50
config.Data.totalUnits = -1
config.Data.publication = False
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4500_R0-1_off_pri'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
db151470bbf8c4aa74cb965d7db73f15f4d712e4 | d8b259ea6401e435643a7b90365489f0ccef61b6 | /chapter5/poplib/poplib_gmail.py | 073067347352f2e51b8438815d313baaad07c4a1 | [
"MIT"
] | permissive | elgsaid/Learning-Python-Networking-Second-Edition | 08598637c2e8cdbebaf5ebf2c2c76cac96b0c76c | 39b68fbb936cf8fa2765c5819dcf0ce0a38a3b79 | refs/heads/master | 2020-04-29T11:10:02.791614 | 2019-03-15T09:17:24 | 2019-03-15T09:17:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | #!/usr/bin/env python3
import poplib
import getpass
mailbox = poplib.POP3_SSL ('pop.gmail.com', 995)
mailbox.user('user@gmail.com')
password = getpass.getpass(prompt='Enter your password:')
mailbox.pass_(password)
EmailInformation = mailbox.stat()
print("Number of new emails: %s ", EmailInformation)
numberOfMails = EmailInformation[0]
num_messages = len(mailbox.list()[1])
for i in range (num_messages):
print("Message number "+str(i+1))
print("--------------------")
# read message
response, headerLines, bytes = mailbox.retr(i+1)
message = '\n'.join (headerLines)
#Parsing the message
parser = Parser()
email = p.parsestr(message)
print("From: "+email["From"])
print("To: "+email["To"])
print("Subject: "+email["Subject"])
print("ID: "+email['message-id'])
content_type = email.get_content_type()
if ("text/plain" == str(content_type)):
print(email.get_payload(decode=True))
# If it is an image, the name of the file is extracted
elif ("image/gif" == str(content_type)):
file_name = email.get_filename()
fp = open(file_name, 'wb')
fp.write(part.get_payload(decode = True))
fp.close()
mailbox.quit()
| [
"jose-manuel.ortega-candel@capgemini.com"
] | jose-manuel.ortega-candel@capgemini.com |
c2e9579ce129c1425c3e34152fa1d73e81a4ab49 | 3b09dc4623dac559c85c0333526d55b0615d79d7 | /problems/160.py | 2954f908cc61c9c88f77a0e50b347db0751403d2 | [] | no_license | Asperas13/leetcode | 5d45bd65c490ada9b3cb2c33331a728eab2ef9b4 | 7f2f1d4f221925945328a355d653d9622107fae7 | refs/heads/master | 2021-09-28T15:54:54.761873 | 2020-05-05T15:29:48 | 2020-05-05T15:30:59 | 145,767,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
memo = {}
while headA or headB:
if headA:
if headA in memo:
return headA
memo[headA] = 1
headA = headA.next
if headB:
if headB in memo:
return headB
memo[headB] = 1
headB = headB.next
return None
| [
"ivan.pashnev@developex.com"
] | ivan.pashnev@developex.com |
09df7e7f68fd8ba2a32ed3f08fa5d77a2593c809 | e9ccc5228e8a4e404aa6e58efbba248a1aa21d5f | /server/sources.py | be7113c98923f89dc9a57907d16e916c00ac67a0 | [] | no_license | thisismyrobot/kindleclock | c65f0a50dc4d799593955584d44572ca28c729bd | 188dd4362691acb563d9f911bd896328c650a63f | refs/heads/master | 2016-08-05T04:48:38.051853 | 2013-01-28T23:55:07 | 2013-01-28T23:55:07 | 6,515,177 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | import base64
import cgi
import datetime
import re
import time
import tools
import urllib
import urllib2
import xml.dom.minidom
def unreadgmail():
try:
auth = open("gmailauth.txt").read()
URL = 'https://gmail.google.com/gmail/feed/atom'
req = urllib2.Request(URL)
req.add_header('Authorization', 'Basic %s' % auth)
dom = xml.dom.minidom.parse(urllib2.urlopen(req))
count = int(dom.getElementsByTagName("fullcount")[0].lastChild.toxml())
cls = 'nounread'
if count > 0:
cls = 'unread'
else:
count = ''
return '{0}<img src="logo_emails.png" class="{1}"/>'.format(count, cls)
except:
pass
return '???'
def agenda():
""" Returns events from a google calendar URL. For instance you could use a
private one like:
https://www.google.com/calendar/feeds/[email address]/private-[stuff]/basic?[options]
The url is stored in calxmlurl.txt in the same folder as sources.py.
The options are whatever suits, I use:
orderby=starttime&sortorder=ascending&singleevents=true&futureevents=true&max-results=5
See the following for hints on options:
* https://developers.google.com/google-apps/calendar/v2/reference#Parameters
* https://developers.google.com/gdata/docs/2.0/reference#Queries
"""
try:
results = ""
URL = open("calxmlurl.txt").read()
dom = xml.dom.minidom.parse(urllib.urlopen(URL))
entries = dom.getElementsByTagName("entry")
for e in dom.getElementsByTagName("entry"):
# Parse out the event title
event = e.getElementsByTagName("title")[0].lastChild.toxml()\
.encode('ascii','ignore')
event = cgi.escape(
tools.unescape(tools.unescape(event)).encode('ascii'))
if len(event) > 20:
event = event[:17] + '...'
# Parse out the summary, this contains the start and end date/time
summary = e.getElementsByTagName("summary")[0].lastChild.toxml()\
.encode('ascii','ignore').split("\n")[0]
date = re.findall(
r'When:.*?[ ]([0-9]{1,2}[ ].*?[0-9]{4}).*?', summary)[0]
date = time.strptime(date, "%d %b %Y")
date = "%i%s" % (date.tm_mday, tools.ordinal(date.tm_mday))
times = re.findall(r'.*?([0-9]{2}:[0-9]{2}).*?', summary)
# Handle "All day" events
displaytime = "All day"
if len(times) > 0:
displaytime = times[0]
# Generate some HTML
results += "%s - <span class=\"dt\">%s, %s</span><br />" %\
(event, date, displaytime)
return results
except:
pass
return "???"
def forecast():
try:
URL = "ftp://ftp2.bom.gov.au/anon/gen/fwo/IDA00007.dat"
data = urllib.urlopen(URL).read()
temp = ""
for line in data.split("\n"):
if line.startswith("094029"):
if (line.split("#")[6] != ""):
temp = "Min: " + line.split("#")[6] + ", "
if (line.split("#")[7] != ""):
temp += "Max: " + line.split("#")[7]
if temp != "":
temp += "<br />"
for line in data.split("\n"):
if line.startswith("094029"):
return temp + line.split("#")[22]
except:
pass
return "???"
def temperature():
try:
URL = "http://www.bom.gov.au/fwo/IDT60901/IDT60901.94970.axf"
data = urllib.urlopen(URL).read()
for line in data.split("\n"):
if line.startswith("0,94970"):
return line.split(",")[7]
except:
pass
return "???" | [
"rwallhead@gmail.com"
] | rwallhead@gmail.com |
6d06e28c7bd02f094783d6e10e8a240deb8a4028 | d0d3697b723e11c33837b8de2d572a44b84a26db | /cruise_track_data_plotting.py | 8e0334b753d7f83150f732e2e9db4165f9c38eef | [
"MIT"
] | permissive | Swiss-Polar-Institute/science-data-utils | dc77e86a1cca0d7642daf97fa3e9045919efd867 | 6a85570ee586fa1ba1644ba2b1c9dea3a5257eae | refs/heads/master | 2022-08-09T10:02:13.678635 | 2021-11-29T20:03:47 | 2021-12-02T07:39:29 | 145,117,181 | 0 | 0 | MIT | 2022-06-21T21:37:22 | 2018-08-17T12:20:58 | Python | UTF-8 | Python | false | false | 3,863 | py | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas
import datetime
#import seaborn as sns
def get_data_file(filepath, columns):
"""Get a subset of the data from one file and write into a dataframe"""
#filepath = input("Enter the full path and name of the file")
dataframe = pandas.read_csv(filepath, usecols=columns, header=0)
return dataframe
def plot_data_sources_from_file():
# get some data from GPS
filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/ace_trimble_gps_2017-01-02.csv'
columns = ['date_time', 'latitude', 'longitude', 'device_id']
gps_data = get_data_file(filepath, columns)
sixty_sec_res_gps = gps_data.iloc[::60]
# get some data from GLONASS
filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/ace_glonass_2017-01-02.csv'
columns = ['date_time', 'latitude', 'longitude', 'device_id']
glonass_data = get_data_file(filepath, columns)
sixty_sec_res_glonass = glonass_data.iloc[::60]
# Plot one second resolution data
plt.subplot(211)
plt.scatter(gps_data.longitude, gps_data.latitude, c="red", label="trimble")
plt.scatter(glonass_data.longitude, glonass_data.latitude, c="green", label="glonass")
plt.title("One-second resolution, 2017-01-02")
plt.xlabel("Longitude, decimal degrees E")
plt.ylabel("Latitude, decimal degrees N")
plt.grid(True)
plt.legend()
# Plot sixty-second resolution data
plt.subplot(212)
plt.scatter(sixty_sec_res_gps.longitude, sixty_sec_res_gps.latitude, c="red", label="trimble")
plt.scatter(sixty_sec_res_glonass.longitude, sixty_sec_res_glonass.latitude, c="green", label="glonass")
plt.title("Sixty-second resolution, 2017-01-02")
plt.xlabel("Longitude, decimal degrees E")
plt.ylabel("Latitude, decimal degrees N")
plt.grid(True)
plt.legend()
plt.tight_layout()
plt.show()
def plot_data_sources_from_dataframe(dataframe, category):
fig, ax = plt.subplots(figsize=(10, 5))
ax.scatter(dataframe['longitude'], dataframe['latitude'], alpha=0.70, c=dataframe[category], cmap=cm.brg)
plt.show()
# def get_flagged_glonass_data(filename, columns):
# # Get GLONASS data
# filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/flagging_data_ace_glonass_2017-01-02.csv'
# columns = ['date_time', 'latitude', 'longitude', 'speed', 'device_id']
# glonass_data_flagged = get_data_file(filepath, columns)
# print(glonass_data_flagged.head(5))
#
# return glonass_data_flagged
def plot_speed(dataframe1, colour, legend_label):
"""Plot the speed of the vessel throughout the cruise to identify outlying speeds."""
# Plot speed data
plt.scatter(dataframe1.iloc[::60].longitude, dataframe1.iloc[::60].speed, c=colour, label=legend_label)
plt.title("Speed of vessel along track")
plt.xlabel("Longitude")
plt.ylabel("Speed of vessel, knots")
plt.grid(True)
plt.legend()
plt.show()
# Plot of frequency distribution of speed of vessel.
plt.subplot(211)
dataframe1['speed'].hist()
plt.title("Frequency distribution of speed of vessel")
plt.xlabel("Speed of vessel, knots")
plt.ylabel("Count")
plt.grid(True)
plt.subplot(212)
dataframe1['speed'].hist(bins=80,range=[0,20])
plt.title("Frequency distribution of speed of vessel")
plt.xlabel("Speed of vessel, knots")
plt.ylabel("Count")
plt.grid(True)
plt.tight_layout()
plt.show()
# filepath = '/home/jen/projects/ace_data_management/wip/cruise_track_data/flagging_data_ace_trimble_gps_2017-01-02.csv'
# columns = ['date_time', 'latitude', 'longitude', 'speed', 'device_id']
# gps_data_flagged = get_data_file(filepath, columns)
# print(gps_data_flagged.head(5))
#
# plot_speed(gps_data_flagged, "red", "trimble")
| [
"jenny_t152@yahoo.co.uk"
] | jenny_t152@yahoo.co.uk |
705621984ef4661f63de4f2a9be8693afc845f01 | 3fd7adb56bf78d2a5c71a216d0ac8bc53485b034 | /tensorflow_data/sawyer/noup_28_dna5/conf.py | 04cd6eb5133f3a850c3be2f9893050d50ffa4d9b | [] | no_license | anair13/lsdc | 6d1675e493f183f467cab0bfe9b79a4f70231e4e | 7760636bea24ca0231b4f99e3b5e8290c89b9ff5 | refs/heads/master | 2021-01-19T08:02:15.613362 | 2017-05-12T17:13:54 | 2017-05-12T17:13:54 | 87,596,344 | 0 | 0 | null | 2017-04-08T00:18:55 | 2017-04-08T00:18:55 | null | UTF-8 | Python | false | false | 1,935 | py | import os
current_dir = os.path.dirname(os.path.realpath(__file__))
# tf record data location:
DATA_DIR = '/'.join(str.split(current_dir, '/')[:-3]) + '/pushing_data/sawyer_noup_29/train'
# local output directory
OUT_DIR = current_dir + '/modeldata'
from video_prediction.prediction_model_downsized_lesslayer import construct_model
configuration = {
'experiment_name': 'rndaction_var10',
'data_dir': DATA_DIR, # 'directory containing data.' ,
'output_dir': OUT_DIR, #'directory for model checkpoints.' ,
'current_dir': current_dir, #'directory for writing summary.' ,
'num_iterations': 50000, #'number of training iterations.' ,
'pretrained_model': '', # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 28, # 'sequence length, including context frames.' ,
'skip_frame': 1, # 'use ever i-th frame to increase prediction horizon' ,
'context_frames': 2, # of frames before predictions.' ,
'use_state': 1, #'Whether or not to give the state+action to the model' ,
'model': 'DNA', #'model architecture to use - CDNA, DNA, or STP' ,
'num_masks': 1, # 'number of masks, usually 1 for DNA, 10 for CDNA, STN.' ,
'schedsamp_k': 900.0, # 'The k hyperparameter for scheduled sampling -1 for no scheduled sampling.' ,
'train_val_split': 0.95, #'The percentage of files to use for the training set vs. the validation set.' ,
'batch_size': 32, #'batch size for training' ,
'learning_rate': 0.001, #'the base learning rate of the generator' ,
'visualize': '', #'load model from which to generate visualizations
'downsize': construct_model, #'create downsized model'
'file_visual': '', # datafile used for making visualizations
'penal_last_only': False, # penalize only the last state, to get sharper predictions
'dna_size': 5, #size of DNA kerns
'sawyer':'',
'numcam':2,
} | [
"frederik.ebert@mytum.de"
] | frederik.ebert@mytum.de |
1df51ff8f1c07cfd37c44135ee621e5a6a511252 | fe7133ea8e879631e63ef3c5312670464ae0970b | /email_test.py | f0d2e88d51707ef12a621e035d9e4ae58ea8429e | [] | no_license | jonathaw/general_scripts | 4f13c55d3544b829488c1d479c2feff1a6c26829 | 0cf47ab3ade55b9396cb5aea00e09dafd2694067 | refs/heads/master | 2021-01-17T02:50:54.936936 | 2017-03-19T17:18:44 | 2017-03-19T17:18:44 | 41,436,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | import smtplib
sender = 'jonathan.weinstein2012@gmail.com'
receivers = ['jonathan.weinstein2012@gmail.com']
message = """From: LSFManager <LSF@manager.com>
To: Me <jonathan.weinstein2012@gmail.com>
Subject: LSFManager Report
This is a test e-mail message.
"""
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print("Successfully sent email")
except:
print("Error: unable to send email") | [
"jonathan.weinstein@weizmann.ac.il"
] | jonathan.weinstein@weizmann.ac.il |
663ddf06b0dcc361f9b79ebadbd3809ffe539966 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayOpenAppSilanApigrayelevenQueryRequest.py | 6ae4d53b1a2df987c4f0ae79daab60b26f7e67a5 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,196 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAppSilanApigrayelevenQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.app.silan.apigrayeleven.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
c509131d6d695838fd5f8caf0e0236271d308935 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ_16_2/16_2_1_Taizo_R1B_A.py | f57872090549f06b308b5880482ee39c7abee60d | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,168 | py |
# -*- coding: utf8 -*-
import sys
# inputFile = "A-small-attempt0.in"
# inputFile = "A-large-practice.in"
inputFile = "A-large.in"
f = open(inputFile)
sys.stdout = open(inputFile.replace(".in", ".txt"), 'w')
tc_num = int(f.readline().rstrip())
k1 = {"Z": ["ZERO", 0], "W": ["TWO", 2], "U": ["FOUR", 4], "X": ["SIX", 6], "G": ["EIGHT", 8]}
k2 = {"O": ["ONE", 1], "R": ["THREE", 3], "F": ["FIVE", 5], "S": ["SEVEN", 7]}
k3 = {"I": ["NINE", 9]}
for tc in range(tc_num):
s = f.readline().rstrip()
numbers = []
for k in k1:
# sys.stderr.write(k + "\n")
while k in s:
# sys.stderr.write(s + "\n")
for c in k1[k][0]:
s = s.replace(c, "", 1)
numbers.append(k1[k][1])
for k in k2:
while k in s:
for c in k2[k][0]:
s = s.replace(c, "", 1)
numbers.append(k2[k][1])
for k in k3:
while k in s:
for c in k3[k][0]:
s = s.replace(c, "", 1)
numbers.append(k3[k][1])
numbers.sort()
ans = ""
for n in numbers:
ans += str(n)
print("Case #" + str(tc + 1) + ": " + ans)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
06b86c6227955f55df9ca40267865fd155d7cdd9 | 75e7093ba88fc8fb7c2787fc9b1f289f058f1807 | /reprounzip/setup.py | 80acb4c5d726fbd814e5e31c3c0d3e42347ffd71 | [
"BSD-3-Clause"
] | permissive | Aloma/reprozip | f8a6e7117d29d7b3a4477acf34f3e09993c7f235 | 449bebbcba0674467515383ecfbd6e9cee1f5dc1 | refs/heads/master | 2020-12-03T09:18:17.523917 | 2014-10-06T20:44:25 | 2014-10-06T20:44:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,651 | py | import os
from setuptools import setup
import sys
# pip workaround
os.chdir(os.path.abspath(os.path.dirname(__file__)))
with open('README.rst') as fp:
description = fp.read()
req = [
'PyYAML',
'rpaths>=0.8']
if sys.version_info < (2, 7):
req.append('argparse')
setup(name='reprounzip',
version='0.4.1',
packages=['reprounzip', 'reprounzip.unpackers'],
entry_points={
'console_scripts': [
'reprounzip = reprounzip.main:main'],
'reprounzip.unpackers': [
'graph = reprounzip.unpackers.graph:setup',
'installpkgs = reprounzip.unpackers.default:setup_installpkgs',
'directory = reprounzip.unpackers.default:setup_directory',
'chroot = reprounzip.unpackers.default:setup_chroot']},
namespace_packages=['reprounzip', 'reprounzip.unpackers'],
install_requires=req,
description="Linux tool enabling reproducible experiments (unpacker)",
author="Remi Rampin, Fernando Chirigati, Dennis Shasha, Juliana Freire",
author_email='reprozip-users@vgc.poly.edu',
maintainer="Remi Rampin",
maintainer_email='remirampin@gmail.com',
url='http://vida-nyu.github.io/reprozip/',
long_description=description,
license='BSD',
keywords=['reprozip', 'reprounzip', 'reproducibility', 'provenance',
'vida', 'nyu'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering',
'Topic :: System :: Archiving'])
| [
"remirampin@gmail.com"
] | remirampin@gmail.com |
d00d7f695d0fa7ea3119cd533450e09474399e48 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/avi/avi_ipaddrgroup.py | fd2cd3c38ff7a56e237e08e0d1bb627f9756c9f5 | [
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 4,970 | py | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_ipaddrgroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of IpAddrGroup Avi RESTful Object
description:
- This module is used to configure IpAddrGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
addrs:
description:
- Configure ip address(es).
apic_epg_name:
description:
- Populate ip addresses from members of this cisco apic epg.
country_codes:
description:
- Populate the ip address ranges from the geo database for this country.
description:
description:
- User defined description for the object.
ip_ports:
description:
- Configure (ip address, port) tuple(s).
marathon_app_name:
description:
- Populate ip addresses from tasks of this marathon app.
marathon_service_port:
description:
- Task port associated with marathon service port.
- If marathon app has multiple service ports, this is required.
- Else, the first task port is used.
name:
description:
- Name of the ip address group.
required: true
prefixes:
description:
- Configure ip address prefix(es).
ranges:
description:
- Configure ip address range(s).
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the ip address group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create an IP Address Group configuration
avi_ipaddrgroup:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
name: Client-Source-Block
prefixes:
- ip_addr:
addr: 10.0.0.0
type: V4
mask: 8
- ip_addr:
addr: 172.16.0.0
type: V4
mask: 12
- ip_addr:
addr: 192.168.0.0
type: V4
mask: 16
"""
RETURN = '''
obj:
description: IpAddrGroup (api/ipaddrgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
addrs=dict(type='list',),
apic_epg_name=dict(type='str',),
country_codes=dict(type='list',),
description=dict(type='str',),
ip_ports=dict(type='list',),
marathon_app_name=dict(type='str',),
marathon_service_port=dict(type='int',),
name=dict(type='str', required=True),
prefixes=dict(type='list',),
ranges=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'ipaddrgroup',
set([]))
if __name__ == '__main__':
main()
| [
"theyashkins@gmail.com"
] | theyashkins@gmail.com |
c3227b65442886d4c177b72911ad5fcb24542c5d | c66c214f062bc4de08354bb15d4d7e343b6b7e4a | /custom_dists/gaussian_circle.py | b07fdde7579a75be61689138444384e4801054dd | [] | no_license | pinakm9/JKO | 1a297f58fd1630d33a2314c82e702208943098e8 | 88265e9f38040e3a6ec73edeec59c85b770bf0ed | refs/heads/master | 2023-05-10T11:03:00.482300 | 2021-06-12T19:42:48 | 2021-06-12T19:42:48 | 352,030,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | import numpy as np
import scipy.stats as ss
import tensorflow as tf
import tensorflow_probability as tfp
class GaussianCircle:
"""
Description:
creates a multimodal distribution aranged on a circle uniformly using iid Gaussians
Args:
mean: mean for each Gaussian distribution
cov: covarinace matrix for each Gaussian distribution
weights: a 1d array
"""
def __init__(self, cov, weights):
self.cov = cov
self.weights = weights / weights.sum()
self.num_modes = len(weights)
self.dim = cov.shape[0]
self.means = np.zeros((self.num_modes, self.dim))
angle = 2.0 * np.pi / self.num_modes
self.tf_probs = []
scale_tril = tf.linalg.cholesky(cov)
for i in range(self.num_modes):
self.means[i, :2] = np.cos(i * angle), np.sin(i * angle)
self.tf_probs.append(tfp.distributions.MultivariateNormalTriL(loc=self.means[i], scale_tril=scale_tril).prob)
def sample(self, size):
"""
Description:
samples from the multimodal distribtion
Args:
size: number of samples to be generated
Returns:
the generated samples
"""
samples = np.zeros((size, self.dim))
idx = np.random.choice(self.num_modes, size=size, replace=True, p=self.weights)
for i in range(size):
samples[i, :] = np.random.multivariate_normal(mean=self.means[idx[i]], cov=self.cov, size=1)
return samples
def pdf(self, x):
"""
Description:
computes probability for given samples
Args:
x: samples at which probability is to be computed
Returns:
the computed probabilities
"""
probs = 0.0
for i in range(self.num_modes):
probs += self.weights[i] * ss.multivariate_normal.pdf(x, mean=self.means[i], cov=self.cov)
return probs
def prob(self, x):
"""
Description:
computes probability for given samples in tensorflow format
Args:
x: samples at which probability is to be computed
Returns:
the computed probabilities
"""
probs = 0.0
for i in range(self.num_modes):
probs += self.weights[i] * self.tf_probs[i](x)
return tf.reshape(probs, (-1, 1)) | [
"pinakm9@gmail.com"
] | pinakm9@gmail.com |
94f6e3cf69a00aa6b1808778722130a275ee4713 | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1407-group-the-people-given-the-group-size-they-belong-to/group-the-people-given-the-group-size-they-belong-to.py | b33c28a8c7eb9dab579c4c5b78fa1624574c7390 | [] | no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | # There are n people that are split into some unknown number of groups. Each person is labeled with a unique ID from 0 to n - 1.
#
# You are given an integer array groupSizes, where groupSizes[i] is the size of the group that person i is in. For example, if groupSizes[1] = 3, then person 1 must be in a group of size 3.
#
# Return a list of groups such that each person i is in a group of size groupSizes[i].
#
# Each person should appear in exactly one group, and every person must be in a group. If there are multiple answers, return any of them. It is guaranteed that there will be at least one valid solution for the given input.
#
#
# Example 1:
#
#
# Input: groupSizes = [3,3,3,3,3,1,3]
# Output: [[5],[0,1,2],[3,4,6]]
# Explanation:
# The first group is [5]. The size is 1, and groupSizes[5] = 1.
# The second group is [0,1,2]. The size is 3, and groupSizes[0] = groupSizes[1] = groupSizes[2] = 3.
# The third group is [3,4,6]. The size is 3, and groupSizes[3] = groupSizes[4] = groupSizes[6] = 3.
# Other possible solutions are [[2,1,6],[5],[0,4,3]] and [[5],[0,6,2],[4,3,1]].
#
#
# Example 2:
#
#
# Input: groupSizes = [2,1,3,3,3,2]
# Output: [[1],[0,5],[2,3,4]]
#
#
#
# Constraints:
#
#
# groupSizes.length == n
# 1 <= n <= 500
# 1 <= groupSizes[i] <= n
#
#
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
d = {}
for i,v in enumerate(groupSizes):
if v in d:
d[v].append(i)
else:
d[v] = [i]
return [ d[i][j:j+i] for i in d for j in range(0,len(d[i]),i) ]
| [
"zx8733520+github@gapp.nthu.edu.tw"
] | zx8733520+github@gapp.nthu.edu.tw |
aa09d6bdb5805a8c7fee8a75dfc9873d3a8b7afc | cefd6c17774b5c94240d57adccef57d9bba4a2e9 | /WebKit/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py | fcc76ca14127db2dbe9b959c1bd40143f03524e3 | [
"BSL-1.0"
] | permissive | adzhou/oragle | 9c054c25b24ff0a65cb9639bafd02aac2bcdce8b | 5442d418b87d0da161429ffa5cb83777e9b38e4d | refs/heads/master | 2022-11-01T05:04:59.368831 | 2014-03-12T15:50:08 | 2014-03-12T15:50:08 | 17,238,063 | 0 | 1 | BSL-1.0 | 2022-10-18T04:23:53 | 2014-02-27T05:39:44 | C++ | UTF-8 | Python | false | false | 2,323 | py | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
class AbstractSequencedCommand(Command):
steps = None
def __init__(self):
self._sequence = StepSequence(self.steps)
Command.__init__(self, self._sequence.options())
def _prepare_state(self, options, args, tool):
return None
def execute(self, options, args, tool):
try:
state = self._prepare_state(options, args, tool)
except ScriptError, e:
_log.error(e.message_with_output())
self._exit(e.exit_code or 2)
self._sequence.run_and_handle_errors(tool, options, state)
| [
"adzhou@hp.com"
] | adzhou@hp.com |
6bf26fdbf45870a7cf98a9ee1cce2f828ceb9159 | 2f74c4d2e5c6dd51eb3eaf0ee4b97122b26e7066 | /unit_03/07_Regular_Expressions/1-Regular_Expressions/2_basics.py | e25bd6bf9d5e8b8fb10b05377993149e0888175b | [
"MIT"
] | permissive | duliodenis/python_master_degree | c6a4ccf5d98c48cfc1efd29dfc116bf55b6b4f01 | 3ab76838ce2fc1606f28e988a3273dd27122a621 | refs/heads/master | 2020-04-14T09:03:51.863305 | 2019-07-22T23:05:19 | 2019-07-22T23:05:19 | 163,751,089 | 21 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | #
# Regular Expressions in Python: Reading Files
# Python Techdegree
#
# Created by Dulio Denis on 12/27/18.
# Copyright (c) 2018 ddApps. All rights reserved.
# ------------------------------------------------
# Challenge 1: Basics
# ------------------------------------------------
# Challenge Task 1 of 5
# Use open() to load the file "basics.txt" into
# the variable file_object.
# ------------------------------------------------
file_object = open("basics.txt")
# ------------------------------------------------
# Challenge Task 2 of 5
# Read the contents of file_object into a new
# variable named data.
data = file_object.read()
# ------------------------------------------------
# Challenge Task 3 of 5
# Now close the file_object file so it isn't taking
# up memory.
file_object.close()
# ------------------------------------------------
# Challenge Task 4 of 5
# Import re. Create an re.match() for the word "Four"
# in the data variable. Assign this to a new variable
# named first.
import re
first = re.match(r"Four", data)
# ------------------------------------------------
# Challenge Task 5 of 5
# Finally, make a new variable named liberty that is
# an re.search() for the word "Liberty" in our data
# variable.
liberty = re.search(r'Liberty', data) | [
"dulio.denis@yahoo.com"
] | dulio.denis@yahoo.com |
ad5ababfd67e9e7708f5e970d4fbad8be6e9e2db | 715a11d7b8f15694a5cc4b47ac0e3a3cfc4ffedc | /bi46/5669.py | b92cbc7001d2c8b620449542ddad67fce5979e74 | [] | no_license | mohanrajanr/CodePrep | 5cd538d16598f6a0d2486357d3cc6e0fa1626e4e | 2e23a5f996139b887bf723f58b23368cf8121cd4 | refs/heads/main | 2023-04-23T04:10:06.111120 | 2021-05-11T06:47:51 | 2021-05-11T06:47:51 | 366,283,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,024 | py | from typing import List
def canChoose(groups: List[List[int]], nums: List[int]) -> bool:
index = len(nums) - 1
hasMatched = False
while groups and nums:
tu = tuple(groups.pop())
# print("Checking :{}".format(tu))
hasMatched = False
while index - len(tu) + 1 >= 0:
values = nums[index - len(tu) + 1: index+1]
# print(values)
if tuple(values) == tu:
hasMatched = True
# print("MM")
t = len(tu)
while t:
index -= 1
nums.pop()
t -= 1
break
index -= 1
nums.pop()
# print(groups)
# print(nums)
return hasMatched and len(groups) == 0
print(canChoose([[1,-1,-1],[3,-2,0]], [1,-1,0,1,-1,-1,3,-2,0]))
print(canChoose([[10,-2],[1,2,3,4]], [1,2,3,4,10,-2]))
print(canChoose([[1,2,3],[3,4]], [1,-1,0,1,-1,-1,3,-2,0]))
print(canChoose([[1,2,3],[3,4]], [7,7,1,2,3,4,7,7])) | [
"mohanrajan1996@gmail.com"
] | mohanrajan1996@gmail.com |
80acb66240a546aa1aad9df8bf32d4cf1bce398f | d5c1d1b162de12942989cb15f5a1e9e9ecf52c82 | /soladm/tests/test_autocomplete.py | 4a7ef3258a43c5de90b443fb60c34f7bd0f76935 | [] | no_license | rr-/soladm | 39730352265e41e558134fe4928ce7c9fe2c50b7 | 67f3e388144d258b861728d81664c78cd6ba2e97 | refs/heads/master | 2021-01-20T07:35:12.826423 | 2017-06-29T11:43:39 | 2017-06-29T11:43:39 | 90,016,113 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | from typing import Tuple, Iterable
import pytest
from soladm.ui import autocomplete
@pytest.mark.parametrize('edit_text,edit_pos,affixes', [
('', 0, [('', '', '')]),
('abc', 0, [('', '', 'abc')]),
('abc def', 0, [('', '', 'abc def')]),
('abc', 2, [('', 'ab', 'c')]),
('ab ', 2, [('', 'ab', ' ')]),
(' c', 2, [('', ' ', 'c')]),
(' bc', 2, [('', ' b', 'c'), (' ', 'b', 'c')]),
('a c', 2, [('', 'a ', 'c')]),
('abc def', 5, [
('', 'abc d', 'ef'),
('abc ', 'd', 'ef'),
]),
('a c def', 5, [
('', 'a c d', 'ef'),
('a ', 'c d', 'ef'),
('a c ', 'd', 'ef'),
]),
])
def test_get_affixes(
edit_text: str,
edit_pos: int,
affixes: Iterable[Tuple[str, str, str]]) -> None:
assert autocomplete.get_affixes(edit_text, edit_pos) == affixes
| [
"rr-@sakuya.pl"
] | rr-@sakuya.pl |
af1ccc3bb73c89919f00a287764d22296201e510 | 83e0a7bae272748dadea3330514039b8658ca426 | /test/util/test_timecoord.py | 05d24029bb6500cb460cb40cacd926c57a973180 | [
"MIT"
] | permissive | achtsnits/xcube | 78d081613d71b7e13cc317fb07c297d98e6267ad | 6bc7bda849a1f2cc8cb2bba1152e0a98d4a97aa5 | refs/heads/master | 2020-08-27T20:46:43.230537 | 2019-10-16T14:24:26 | 2019-10-16T14:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | import unittest
import numpy as np
from test.sampledata import create_highroc_dataset
from xcube.util.timecoord import add_time_coords, to_time_in_days_since_1970, timestamp_to_iso_string, \
from_time_in_days_since_1970
class AddTimeCoordsTest(unittest.TestCase):
def test_add_time_coords_point(self):
dataset = create_highroc_dataset()
dataset_with_time = add_time_coords(dataset, (365 * 47 + 20, 365 * 47 + 20))
self.assertIsNot(dataset_with_time, dataset)
self.assertIn('time', dataset_with_time)
self.assertEqual(dataset_with_time.time.shape, (1,))
self.assertNotIn('time_bnds', dataset_with_time)
def test_add_time_coords_range(self):
dataset = create_highroc_dataset()
dataset_with_time = add_time_coords(dataset, (365 * 47 + 20, 365 * 47 + 21))
self.assertIsNot(dataset_with_time, dataset)
self.assertIn('time', dataset_with_time)
self.assertEqual(dataset_with_time.time.shape, (1,))
self.assertIn('time_bnds', dataset_with_time)
self.assertEqual(dataset_with_time.time_bnds.shape, (1, 2))
def test_to_time_in_days_since_1970(self):
self.assertEqual(17324.5,
to_time_in_days_since_1970('201706071200'))
self.assertEqual(17325.5,
to_time_in_days_since_1970('201706081200'))
self.assertEqual(17690.5,
to_time_in_days_since_1970('2018-06-08 12:00'))
self.assertEqual(17690.5,
to_time_in_days_since_1970('2018-06-08T12:00'))
def test_from_time_in_days_since_1970(self):
self.assertEqual('2017-06-07T12:00:00.000000000',
str(from_time_in_days_since_1970(to_time_in_days_since_1970('201706071200'))))
self.assertEqual('2017-06-08T12:00:00.000000000',
str(from_time_in_days_since_1970(to_time_in_days_since_1970('201706081200'))))
self.assertEqual('2018-06-08T12:00:00.000000000',
str(from_time_in_days_since_1970(to_time_in_days_since_1970('2018-06-08 12:00'))))
self.assertEqual('2018-06-08T12:00:00.000000000',
str(from_time_in_days_since_1970(to_time_in_days_since_1970('2018-06-08T12:00'))))
class TimestampToIsoStringTest(unittest.TestCase):
def test_it_with_default_res(self):
self.assertEqual("2018-09-05T00:00:00Z",
timestamp_to_iso_string(np.datetime64("2018-09-05")))
self.assertEqual("2018-09-05T10:35:42Z",
timestamp_to_iso_string(np.datetime64("2018-09-05 10:35:42")))
self.assertEqual("2018-09-05T10:35:42Z",
timestamp_to_iso_string(np.datetime64("2018-09-05 10:35:42.164")))
def test_it_with_h_res(self):
self.assertEqual("2018-09-05T00:00:00Z",
timestamp_to_iso_string(np.datetime64("2018-09-05"), freq="H"))
self.assertEqual("2018-09-05T11:00:00Z",
timestamp_to_iso_string(np.datetime64("2018-09-05 10:35:42"), freq="H"))
self.assertEqual("2018-09-05T11:00:00Z",
timestamp_to_iso_string(np.datetime64("2018-09-05 10:35:42.164"), freq="H"))
| [
"norman.fomferra@gmail.com"
] | norman.fomferra@gmail.com |
e55764e47834c1865fe67bbb512f3243934e79f4 | 256f817910dd698970fab89871c6ce66a3c416e7 | /1. solvedProblems/30. Substring with Concatenation of All Words/30.py | ce5a51cd4adedf962473dba896bff6a6a7f0783b | [] | no_license | tgaochn/leetcode | 5926c71c1555d2659f7db4eff9e8cb9054ea9b60 | 29f1bd681ae823ec6fe755c8f91bfe1ca80b6367 | refs/heads/master | 2023-02-25T16:12:42.724889 | 2021-02-04T21:05:34 | 2021-02-04T21:05:34 | 319,225,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,228 | py | # !/usr/bin/env python
# coding: utf-8
"""
Author:
Tian Gao (tgaochn@gmail.com)
CreationDate:
Sat, 11/28/2020, 19:54
# !! Description:
"""
import sys
from typing import List
sys.path.append('..')
from utils import binaryTree, nTree, singleLinkedList
from utils.utils import (
printMatrix,
printDict,
printList,
isMatrix,
)
ListNode = singleLinkedList.ListNode
TreeNode = binaryTree.TreeNode
Node = nTree.Node
null = None
testCaseCnt = 6
# maxFuncInputParaCnt = 8
# !! step1: replace these two lines with the given code
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
l, r = 0, 0
n = len(s)
wordLen = len(words[0])
wordCnt = len(words)
winLen = wordCnt * wordLen
self.wordFreqHash = {}
for word in words:
self.wordFreqHash.setdefault(word, 0)
self.wordFreqHash[word] += 1
from collections import deque
win = deque()
rlt = []
def isValidRlt(win, words, wordCnt, wordLen):
winWordFreqHash = {}
winString = ''.join(list(win))
for i in range(wordCnt):
curWinWord = winString[i * wordLen: i * wordLen + wordLen]
if curWinWord not in self.wordFreqHash: return False
winWordFreqHash.setdefault(curWinWord, 0)
winWordFreqHash[curWinWord] += 1
for word, freq in self.wordFreqHash.items():
winWordFreq = winWordFreqHash.get(word, 0)
if freq != winWordFreq: return False
return True
while r < n:
eleR = s[r]
win.append(eleR)
r += 1
# !! 长度不够
if r - l < winLen:
continue
if isValidRlt(win, words, wordCnt, wordLen):
# print(''.join(win))
rlt.append(l)
win.popleft()
l += 1
return rlt
# endFunc
# endClass
def func():
# !! step2: change function name
s = Solution()
myFuncLis = [
s.findSubstring,
# optional: add another function for comparison
]
onlyDisplayError = True
enableInput = [True] * testCaseCnt
input = [None] * testCaseCnt
expectedRlt = [None] * testCaseCnt
# enableInput[0] = False
# enableInput[1] = False
# enableInput[2] = False
# enableInput[3] = False
# enableInput[4] = False
# enableInput[5] = False
# !! step3: change input para, input para can be found in "run code" - "test case"
# ! para1
input[0] = (
"barfoothefoobarman",
["foo", "bar"],
# binaryTree.buildTree(None)
# singleLinkedList.buildSingleList(None)
# nTree.buildTree(None)
)
expectedRlt[0] = [0, 9]
# ! para2
input[1] = (
"wordgoodgoodgoodbestword",
["word", "good", "best", "word"],
# binaryTree.buildTree(None),
# singleLinkedList.buildSingleList(None),
# nTree.buildTree(None),
)
expectedRlt[1] = []
# ! para3
input[2] = (
"barfoofoobarthefoobarman",
["bar", "foo", "the"],
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[2] = [6, 9, 12]
# ! para4
input[3] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[3] = None
# ! para5
input[4] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[4] = None
# ! para6
input[5] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[5] = None
# !! ====================================
# function and parameters count
allInput = [(input[i], enableInput[i], expectedRlt[i]) for i in range(testCaseCnt)]
if not input[0]:
print("ERROR: please assign at least one input for input[0]!")
exit()
funcParaCnt = 1 if not isinstance(input[0], tuple) else len(input[0])
funcCnt = len(myFuncLis)
# for each test case
for inputPara, enableInput, expectedRlt in allInput:
if not enableInput or not inputPara: continue
inputParaList = [None] * funcParaCnt
if not isinstance(inputPara, tuple):
inputPara = [inputPara]
for j in range(funcParaCnt):
inputParaList[j] = inputPara[j]
# for each function
for j in range(funcCnt):
print('==' * 20)
myFunc = myFuncLis[j]
# ! manually call function, max para count: 8
rlt = None
if funcParaCnt == 1:
rlt = myFunc(inputPara[0])
if funcParaCnt == 2:
rlt = myFunc(inputPara[0], inputPara[1])
if funcParaCnt == 3:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2])
if funcParaCnt == 4:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3])
if funcParaCnt == 5:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4])
if funcParaCnt == 6:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5])
if funcParaCnt == 7:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5], inputPara[6])
if funcParaCnt == 8:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5], inputPara[6], inputPara[7])
# only output when the result is not expected
if onlyDisplayError and expectedRlt is not None and expectedRlt == rlt: continue
# output function name
if funcCnt > 1:
print('func: \t%s' % myFunc.__name__)
# output para
for k in range(funcParaCnt):
para = inputParaList[k]
formatPrint('input %s:' % (k + 1), para)
# output result
print()
if not rlt:
print('rlt:\t', rlt)
else:
formatPrint('rlt:', rlt)
if expectedRlt is not None:
if not expectedRlt:
print('expRlt:\t', expectedRlt)
else:
formatPrint('expRlt:', expectedRlt)
print('==' * 20)
# endFunc
def isSpecialInstance(myInstance):
for curType in [TreeNode, Node]:
if isinstance(myInstance, curType):
return True
return False
# endFunc
def formatPrint(prefix, data):
if isMatrix(data):
print('%s' % prefix)
printMatrix(data)
else:
splitter = '\n' if isSpecialInstance(data) else '\t'
print('%s%s%s' % (prefix, splitter, data))
# endFunc
def main():
func()
# endMain
if __name__ == "__main__":
main()
# endIf
| [
"tgaochn@gmail.com"
] | tgaochn@gmail.com |
1f8bce0c11115e32a2762fd95571725acd8d8627 | a3d058c6a80d4068fa4d3185ddd2dec91abc82d7 | /190103_카펫.py | bf7c0ea85c0440df0a025dc48826d72f324705fd | [] | no_license | guard1000/Everyday-coding | d6f496654b635738a4284270f6c5d285116a760e | 7755f99cdb512d623392af82282bf17b47cb77f2 | refs/heads/master | 2021-08-18T22:26:04.322162 | 2021-07-21T14:53:28 | 2021-07-21T14:53:28 | 161,440,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | def solution(brown, red):
answer = []
xlist = []
i = 0
for x in range(1, red + 1):
if red % x == 0 and x not in xlist:
xlist.append(red / x)
for x2 in xlist:
n = 1
while 2 * x2 + 2 * (red / x2) - 4 + 8 * n <= brown:
if 2 * x2 + 2 * (red / x2) - 4 + 8 * n == brown:
i = x2 + n * 2
break
n = n + 1
if i != 0:
break
answer.append(i)
answer.append((brown + red) / answer[0])
return answer | [
"cjsdnr885@naver.com"
] | cjsdnr885@naver.com |
76b3b454adef458d8f84bb8c711f378e962c4afd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_perfectest.py | 617a0575b5947bdc969f79d6b1bbe1909e2fe462 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py |
from xai.brain.wordbase.nouns._perfect import _PERFECT
#calss header
class _PERFECTEST(_PERFECT, ):
def __init__(self,):
_PERFECT.__init__(self)
self.name = "PERFECTEST"
self.specie = 'nouns'
self.basic = "perfect"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d60b27a09af020085f55092b28cf65da6aae07f6 | 5345cc368ac108776188118d417a8aff8604ec0c | /tests/fields/test_registering.py | 83183bb4691b6316d9e0d783674e0c8fd433bddd | [
"MIT"
] | permissive | hochshi/wagtailstreamforms | 7229097390c34dd100d812a35d7d74b0092479f7 | 8be02c5606d87d0e7f4f648866c36290207163a8 | refs/heads/3-dev | 2020-04-14T17:49:10.846230 | 2019-06-11T08:25:27 | 2019-06-11T08:25:27 | 163,995,051 | 1 | 0 | MIT | 2019-06-11T08:25:28 | 2019-01-03T16:10:34 | Python | UTF-8 | Python | false | false | 463 | py | from django import forms
from wagtailstreamforms import fields
from ..test_case import AppTestCase
class MyField(fields.BaseField):
field_class = forms.CharField
class TestFieldRegistering(AppTestCase):
@classmethod
def setUpClass(cls):
fields.register('myfield', MyField)
@classmethod
def tearDownClass(cls):
del fields._fields['myfield']
def test_field(self):
self.assertIn('myfield', fields.get_fields())
| [
"stuart@accentdesign.co.uk"
] | stuart@accentdesign.co.uk |
d0791d0c24b78ce2b664deac0f6b69070ad79928 | 7e8f67b9b3c7d17b49c2f9677afea78245e8b29f | /accounts/tests/views/test_captive.py | fd0150e96c1ed234d9c86241eefdb1c154683fe1 | [] | no_license | deone/billing-v1 | 23672addfbe8479a45ccf976cafdf6cbe1220834 | ebb933ec2453810fb1c0f565efa8142c82743b85 | refs/heads/master | 2021-08-18T04:17:25.435711 | 2019-06-17T12:52:50 | 2019-06-17T12:52:50 | 90,369,896 | 0 | 0 | null | 2021-06-10T18:38:10 | 2017-05-05T11:40:45 | Python | UTF-8 | Python | false | false | 2,802 | py | from django.core.urlresolvers import reverse
from ...forms import LoginForm
from . import ViewsTests
class CaptiveTests(ViewsTests):
def test_captive(self):
get_params = "?login_url=https%3A%2F%2Fn110.network-auth.com%2Fsplash%2Flogin%3Fmauth%3DMMzZUJGqtrsmvkKw6ktCkcNsuBgluav4m2vgE4p-nFliz6lOzP99ntPzZAjvJ_Yit73ZfWwRDIzoEAwzZSuErRpQwdfD0vVA3XjsLLlK8UNiucySNAij7FEqEAF9osnXpWioNcUpyn7BYW8pP5C-wdZAQpLAWS-lv4UTivlfTUn92n4RxMaWG52Q%26continue_url%3Dhttps%253A%252F%252Fn110.network-auth.com%252Fsplash%252Fconnected%253Fhide_terms%253Dtrue&continue_url=https%3A%2F%2Fn110.network-auth.com%2Fsplash%2Fconnected%3Fhide_terms%3Dtrue&ap_mac=00%3A18%3A0a%3Af2%3Ade%3A20&ap_name=Djungle+HQ+02&ap_tags=office-accra+recently-added&client_mac=4c%3A8d%3A79%3Ad7%3A6b%3A28&client_ip=192.168.2.65"
response = self.c.get(''.join([reverse('captive'), get_params]))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Log In')
self.assertTrue(isinstance(response.context['form'], LoginForm))
self.assertTrue('login_url' in response.context)
self.assertTrue('success_url' in response.context)
def test_captive_with_error_message(self):
get_params = "?login_url=https%3A%2F%2Fn110.network-auth.com%2Fsplash%2Flogin%3Fmauth%3DMMzZUJGqtrsmvkKw6ktCkcNsuBgluav4m2vgE4p-nFliz6lOzP99ntPzZAjvJ_Yit73ZfWwRDIzoEAwzZSuErRpQwdfD0vVA3XjsLLlK8UNiucySNAij7FEqEAF9osnXpWioNcUpyn7BYW8pP5C-wdZAQpLAWS-lv4UTivlfTUn92n4RxMaWG52Q%26continue_url%3Dhttps%253A%252F%252Fn110.network-auth.com%252Fsplash%252Fconnected%253Fhide_terms%253Dtrue&error_message=Access+denied+for+herbertellisspectradjungle%40spectrawireless.com&continue_url=https%3A%2F%2Fn110.network-auth.com%2Fsplash%2Fconnected%3Fhide_terms%3Dtrue&ap_mac=00%3A18%3A0a%3Af2%3Ade%3A20&ap_name=Djungle+HQ+02&ap_tags=office-accra+recently-added&client_mac=4c%3A8d%3A79%3Ad7%3A6b%3A28&client_ip=192.168.2.65"
response = self.c.get(''.join([reverse('captive'), get_params]))
self.assertEqual(response.status_code, 200)
self.assertTrue('error_message' in response.context)
def test_captive_without_get_params(self):
response = self.c.get(reverse('captive'))
self.assertEqual(response.status_code, 404)
def test_success(self):
get_params = "?logout_url=https%3A%2F%2Fn110.network-auth.com%2Fsplash%2Flogout%3Fkey%3DMM7n9oxmBMVzgXgqkvAbLsLTh2cP7lcZdnhrqPRdHlIqzFHCNSRkxoiKzMGmTDQw7dGd092BdPfUs"
response = self.c.get(''.join([reverse('success'), get_params]))
self.assertEqual(response.status_code, 200)
self.assertTrue('logout_url' in response.context)
def test_success_without_get_params(self):
response = self.c.get(reverse('success'))
self.assertEqual(response.status_code, 200)
| [
"alwaysdeone@gmail.com"
] | alwaysdeone@gmail.com |
bd640c0792b3901848aa7820f8ec89682ceb850c | 77c32baf29e5718a07fec9dfaee89cdff3c0f23d | /instance/migrations/0001_initial.py | 62952b23e6eef1835c52c02b26b0ce56df746423 | [] | no_license | syed-saif/hackathon_backend | f438c49268e182ae6a51b9f1650c02e423ea32cd | 37a092cfb27c6e1be3652e17eea3b712ce9b3fd1 | refs/heads/main | 2023-02-09T16:16:51.242683 | 2021-01-12T12:33:03 | 2021-01-12T12:33:03 | 328,973,595 | 0 | 0 | null | 2021-01-12T12:02:59 | 2021-01-12T12:02:58 | null | UTF-8 | Python | false | false | 763 | py | # Generated by Django 3.1.3 on 2020-11-25 16:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DetecHandWritten',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='detect_images/')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"="
] | = |
9aa3c92f85d67695e2c53c6a11ecb3381ce8adb5 | d02261797ab1f6d9ba85370fbb5d73e84390154e | /hunt/special_puzzles/ktane/manual/grandom.py | fb776263fe43cec87e02e1362b35575be15f8e0b | [
"MIT"
] | permissive | YewLabs/2021-hunt | 2c965da93b92d0a53bfa25938b376ebefecc241d | c6ea6bdf17571642ee4e7463a2a363292ff9f972 | refs/heads/master | 2023-03-04T04:16:12.412893 | 2021-02-07T15:48:01 | 2021-02-07T16:50:45 | 336,824,373 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,180 | py | from random import Random
class GRandom(Random):
def shuffled(self, x):
x = list(x)
self.shuffle(x)
return x
def distrib(self, total, count, *, min_=0, max_=None, skew=1):
if min_ * count > total:
raise ValueError(
f"The total must be at least {min_}*{count}={min_*count} "
f"when count={count} and min_={min_}"
)
if max_ is not None and max_ * count < total:
raise ValueError(
f"The total must be at most {max_}*{count}={max_*count} "
f"when count={count} and max_={max_}"
)
if skew <= 0:
raise ValueError("The skew has to be at least 1.")
if max_ is None:
max_ = total
dist = [min_] * count
inds = self.shuffled(range(count))
for it in range(total - min_ * count):
while True:
assert inds
idx = min(self.randrange(len(inds)) for it in range(skew))
if dist[inds[idx]] < max_:
dist[inds[idx]] += 1
break
else:
inds[idx], inds[-1] = inds[-1], inds[idx]
inds.pop()
assert sum(dist) == total
assert min_ <= min(dist) <= max(dist) <= max_
return dist
def data(self, arg, n=1):
length = len(self.data_[arg])
k = length + n + 1 if n < 0 else n
return self.sample(self.data_[arg], k)
def directions(self):
return "".join(self.shuffled("CGNESW"))
def side(self):
return self.choice("UDFLBR")
def talk_press(self):
return "".join(self.choice("MmNn") for _ in range(4))
def range_(self, min_, avg, max_):
min_avg, max_avg = int((min_ + avg) / 2), int((max_ + avg) / 2)
ch = self.randint(0, 6)
if ch <= 1:
return {"min": self.randint(avg, max_avg)}
elif ch <= 3:
return {"max": self.randint(min_avg, avg)}
elif ch <= 5:
z = self.randint(min_avg, max_avg)
return {"min": z, "max": z}
else:
return {
"min": self.randint(min_avg, avg),
"max": self.randint(avg, max_avg),
}
def date_range_(self):
ch = self.randint(0, 6)
if ch <= 2:
res = {"obj": "day", "min": 1, "max": 31}
res.update(self.range_(1, 14, 31))
return res
elif ch <= 4:
res = {"obj": "month", "min": 1, "max": 12}
res.update(self.range_(1, 4, 12))
return res
else:
res = {"obj": "year", "min": 2000, "max": 2020}
res.update(self.range_(2000, 2010, 2020))
return res
def range(self, obj):
if obj == "date":
res = self.date_range_()
else:
res = {"obj": obj}
res.update(self.range_(*self.data_["bounds"][obj]))
return {"range": res}
def simple_(self, objs, extra=None):
new_objs = objs[:]
if extra:
new_objs.extend(extra)
obj = self.choice(new_objs)
if extra and obj in extra:
return {obj: self.data(obj)[0]}
ch = self.randint(0, 5)
if ch == 0 and obj != "date":
return {"odd": obj}
elif ch == 1 and obj != "date":
return {"even": obj}
return self.range(obj)
def condition_(self, objs, complexity=0, extra=None):
# objs: ["batteries", "ports", "date", "serial digit"]
# extra: ["gravity", "serial has"]
if complexity == 3:
res = self.condition_(objs, 2, extra)
return res if self.randint(0, 4) else {"not": res}
elif complexity == 2:
ch = self.randint(0, 3)
if ch <= 1 and len(objs) > 1:
head, *tail = objs
return {
"and"
if ch
else "or": [
self.condition_([head], 1),
self.condition_(tail, 1, extra),
]
}
return self.condition_(objs, 1, extra)
elif complexity == 1:
res = self.simple_(objs, extra)
has = lambda x: x in res["range"]
one_sided = "range" in res and (has("min") != has("max"))
return {"not": res} if not (self.randint(0, 3) or one_sided) else res
return self.simple_(objs, extra)
def simplify(self, res):
# de morgan (and not not -> not or)
if "and" in res and all("not" in x for x in res["and"]):
return self.simplify({"not": {"or": [x["not"] for x in res["and"]]}})
# de morgan (not and -> or not not)
if "not" in res and "and" in res["not"]:
return self.simplify({"or": [self.simplify({"not": x}) for x in res["not"]["and"]]})
# double negation
if "not" in res and "not" in res["not"]:
return self.simplify(res["not"]["not"])
return res
def condition(self, *args):
return self.simplify(self.condition_(*args))
| [
"dvorak42@mit.edu"
] | dvorak42@mit.edu |
568fbfe4e7d7d0dddaf488b5339808d9e0641214 | f3eae8877d8065abced3ad5eadc1a084c9569e80 | /functional_preprocessing/topup-version/struct_preproc/structural.py | 560febe8d64410fbd04fde4e56564066aa1356a7 | [] | no_license | fBeyer89/RSV_rsanalysis | 586afc52a5a93fb681166fd7ee0795d0197a3a63 | 1128ace44f52143e94d9c98865c084d30aeca36c | refs/heads/master | 2020-04-09T05:46:54.200643 | 2019-04-18T14:22:42 | 2019-04-18T14:22:42 | 92,715,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,213 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 9 14:33:51 2015
@author: fbeyer
"""
'''
Main workflow for preprocessing of mprage data
===============================================
Uses file structure set up by conversion
'''
from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.io as nio
from reconall import create_reconall_pipeline
from mgzconvert import create_mgzconvert_pipeline
from ants import create_normalize_pipeline
#from brainextract import create_brainextract_pipeline
def create_structural(subject, working_dir, data_dir, freesurfer_dir, out_dir, standard_brain):
# main workflow
struct_preproc = Workflow(name='anat_preproc')
struct_preproc.base_dir = working_dir
struct_preproc.config['execution']['crashdump_dir'] = struct_preproc.base_dir + "/crash_files"
# select files
#templates={'anat': '3T/nifti/MPRAGEADNI32Ch.nii.gz'}
#selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir), name="selectfiles")
# workflow to run freesurfer reconall
reconall=create_reconall_pipeline()
reconall.inputs.inputnode.fs_subjects_dir=freesurfer_dir
reconall.inputs.inputnode.fs_subject_id=subject
# workflow to get brain, head and wmseg from freesurfer and convert to nifti
mgzconvert = create_mgzconvert_pipeline()
mgzconvert.inputs.inputnode.fs_subjects_dir=freesurfer_dir
mgzconvert.inputs.inputnode.fs_subject_id=subject
normalize = create_normalize_pipeline()
normalize.inputs.inputnode.standard = standard_brain
# sink to store files
sink = Node(nio.DataSink(base_directory=out_dir,
parameterization=False,
substitutions=[
('transform_Warped', 'T1_brain2mni')]),
name='sink')
# connections
struct_preproc.connect(
[#(selectfiles, sink, [('anat', 'outputnode.test')]),
#(selectfiles, reconall, [('anat', 'inputnode.anat')]),
#(reconall, mgzconvert, [('outputnode.fs_subject_id', 'inputnode.fs_subject_id'),
# ('outputnode.fs_subjects_dir', 'inputnode.fs_subjects_dir')]),
#for second round of structural don't redo FREESURFER
(mgzconvert, normalize, [('outputnode.anat_brain', 'inputnode.anat')]),
(mgzconvert, sink, [('outputnode.anat_head', '@head')]),
(mgzconvert, sink, [('outputnode.anat_brain', '@brain')]),
(mgzconvert, sink, [('outputnode.anat_brain_mask', '@mask')]),
(mgzconvert, sink, [('outputnode.wmedge', '@wmedge')]),
(normalize, sink, [('outputnode.anat2std', '@anat2std'),
('outputnode.anat2std_transforms', 'transforms2mni.@anat2std_transforms'),
('outputnode.std2anat_transforms', 'transforms2mni.@std2anat_transforms')])
])
struct_preproc.write_graph(dotfilename='struct_preproc.dot', graph2use='colored', format='pdf', simple_form=True)
# struct_preproc.run()
struct_preproc.run() #, plugin_args = {'initial_specs': 'request_memory = 1500'}plugin='CondorDAGMan'
#struct_preproc.run(plugin='MultiProc')
| [
"fbeyer@cbs.mpg.de"
] | fbeyer@cbs.mpg.de |
0cb1eef4e1b828ab9c069f0b9fbd70fb3d42629f | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /165/165.compare-version-numbers.163040956.Runtime-Error.leetcode.py | f2dc2de336fe3c1104efe74edcd9f208144c57ac | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | class Solution(object):
def compareVersion(self, version1, version2):
arr1 = version1.split(".")
arr2 = version2.split(".")
i = 0
while(i < len(arr1)):
if int(arr2[i]) > int(arr1[i]):
return -1
if int(arr1[i]) > int(arr2[i]):
return 1
i += 1
return 0
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
68bcfd2922d3dba3e542fed5e919fa83143d9bfa | ba602dc67ad7bb50133aeb312f3c6c54627b3dec | /data/3922/AC_py/508134.py | 7f1f09ae0a4fe4b50f052a90e596911ef725a583 | [] | no_license | Dearyyyyy/TCG | 0d21d89275906157372d775f33309ce337e6bc95 | 7b80de16de2d3f5d95a7c4ed95d45a9e38882e67 | refs/heads/master | 2020-12-27T23:19:44.845918 | 2020-02-04T01:59:23 | 2020-02-04T01:59:23 | 238,101,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # coding=utf-8
import sys
while True:
a,b=map(float,input().split())
if b!=0:
print(int((a/b+0.5)//1))
else:
print("error") | [
"543271544@qq.com"
] | 543271544@qq.com |
d1ebe71d5c08688b5518526b99952e558aa18674 | a439ca43178d38cfe6daaee50ea134ca6c52b502 | /thaniya_server_archive/src/thaniya_server_archive/volumes/__init__.py | be1415d1034a205e635c0328cb07d874915aaa23 | [
"Apache-2.0"
] | permissive | jkpubsrc/Thaniya | 37ca727abdc6f9f605257813889fe3a033995bba | 4ebdf2854e3d7888af7396adffa22628b4ab2267 | refs/heads/master | 2023-03-05T20:58:59.528746 | 2021-02-15T19:31:06 | 2021-02-15T19:31:06 | 331,318,787 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py |
__version__ = "0.2021.1.20.1"
from .BackupVolumeID import BackupVolumeID
from ._RawDeviceIterator import _RawDeviceIterator
from .Device import Device
from .DeviceIterator import DeviceIterator
from .BackupVolumeInfo import BackupVolumeInfo
from .BackupVolumeCfgFile import BackupVolumeCfgFile
from .BackupVolumeManager import BackupVolumeManager | [
"pubsrc@binary-overflow.de"
] | pubsrc@binary-overflow.de |
260778a99413ef35e79f6adf633bb47c165b2e41 | 2dc8ac19e5f6f5fb8638bbdd1917a15094f3431b | /correlation discovery/combine_Lynkwifi_and_col_bus_sub_manhattan_each_injured_each_killed/map.py | 1723b4d92d67bad3c40c014d5a2c07ff87df6afb | [] | no_license | rgc292/Capstone_Project | 8182f2d8143e50db2654da343bd82ae66e74b1c3 | 35b16193f2363277fdf691dced704f56da1f8331 | refs/heads/master | 2020-07-01T04:57:03.365113 | 2016-12-20T01:51:42 | 2016-12-20T01:51:42 | 74,094,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,401 | py | #!/usr/bin/python
import sys
import numpy as np
import csv
import StringIO
col_bus_sub_year = []
col_bus_sub_month = []
col_bus_sub_day = []
col_bus_sub_lat = []
col_bus_sub_lon = []
wifi_year = []
wifi_month = []
wifi_day = []
wifi_lat = []
wifi_lon = []
col_bus_sub = []
counter = -1
def dist(coord1,coord2,_type):
radius = 6371.0
if _type == 'lat':
dlat = np.deg2rad(coord2-coord1)
dlon = np.deg2rad(0.0)
a = np.sin(dlat/2.0)**2 + np.cos(np.deg2rad(coord1)) * np.cos(np.deg2rad(coord2)) * np.sin(dlon/2.0) * np.sin(dlon/2.0)
c = 2.0 * np.arctan2(a**.5, (1.0-a)**.5)
d = radius * c
return d/1.6
elif _type == 'lon':
dlat = np.deg2rad(0.0)
dlon = np.deg2rad(coord2-coord1)
a = np.sin(dlat/2.0)**2 + np.cos(np.deg2rad(0.0)) * np.cos(np.deg2rad(0.0)) * np.sin(dlon/2.0) * np.sin(dlon/2.0)
c = 2.0 * np.arctan2(a**.5, (1.0-a)**.5)
d = radius * c
return (d/1.6)/1.311260927412249
# input comes from STDIN (stream data that goes to the program)
for line in sys.stdin:
try:
#Fill in your map code here. To write to output file, use "print"
# remove leading and trailing whitespace
csv_file = StringIO.StringIO(line)
csv_reader = csv.reader(csv_file)
except:
pass
for l in csv_reader:
if len(l) == 5:
if (l[0] == '' or l[1] == '' or l[2] == '' or l[3] == '' or l[3] == '0' or l[4] == '' or l[4] == '0'):
pass
else:
wifi_year.append(l[0])
wifi_month.append(l[1])
wifi_day.append(l[2])
wifi_lat.append(l[3])
wifi_lon.append(l[4])
elif (len(l) == 13):
if (l[9] == '' or l[10] == '' or l[9] == '0' or l[10] == '0'):
pass
else:
col_bus_sub_year.append(l[0].split(',')[0].split('-')[0])
col_bus_sub_month.append(l[0].split(',')[0].split('-')[1])
col_bus_sub_day.append(l[0].split(',')[0].split('-')[2])
col_bus_sub_lat.append(l[9])
col_bus_sub_lon.append(l[10])
col_bus_sub.append(','.join(map(str, l)).strip())
else:
pass
flag = 0
for lat_col_bus_sub, lon_col_bus_sub in zip(col_bus_sub_lat,col_bus_sub_lon):
wifi_counter = -1
counter += 1
flag = 0
for lat_wifi, lon_wifi in zip(wifi_lat,wifi_lon):
wifi_counter += 1
lat_d = dist(float(lat_col_bus_sub),float(lat_wifi),'lat')
lon_d = dist(float(lon_col_bus_sub),float(lon_wifi),'lon')
distance = lat_d + lon_d
if (distance <= 0.031): # 0.015625
if (int(col_bus_sub_year[counter]) > int(wifi_year[wifi_counter])):
flag = 1
elif (int(col_bus_sub_year[counter]) == int(wifi_year[wifi_counter])):
if (int(col_bus_sub_month[counter]) > int(wifi_month[wifi_counter])):
print '%s' %('ok')
flag = 1
elif (int(col_bus_sub_month[counter]) == int(wifi_month[wifi_counter])):
if (int(col_bus_sub_day[counter]) > int(wifi_day[wifi_counter])):
flag = 1
else:
pass
else:
pass
else:
pass
else:
pass
if flag == 1:
print '%s,%d' %(col_bus_sub[counter],1)
elif flag == 0:
print '%s,%d' %(col_bus_sub[counter],0)
else:
pass | [
"Rafa@192.168.1.139"
] | Rafa@192.168.1.139 |
9cfa025c1c6cdcd1e98a7044b1aaa4b444395e64 | 8a7c56ea3eb73518cdf8d898f6a6f5883b105ec7 | /src/trace_msg_bfms/trace_msg_bfm.py | 548bfed2f68f6a15d64ab0b742786b1635c0a1a2 | [
"Apache-2.0"
] | permissive | pybfms/pybfms-trace-msg | cb8b5017af20ce52697d960f8d48574370459416 | 86eb7f7530a04c50ed79c88cb5ae452983dc31ed | refs/heads/main | 2023-06-08T08:37:53.333590 | 2021-06-28T01:47:18 | 2021-06-28T01:47:18 | 368,890,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py |
import pybfms
@pybfms.bfm(hdl={
pybfms.BfmType.Verilog : pybfms.bfm_hdl_path(__file__, "hdl/trace_msg_bfm.v"),
pybfms.BfmType.SystemVerilog : pybfms.bfm_hdl_path(__file__, "hdl/trace_msg_bfm.v"),
}, has_init=True)
class TraceMsgBfm():
def __init__(self):
self.busy = pybfms.lock()
self.is_reset = False
self.reset_ev = pybfms.event()
self.msg_sz = 0
pass
@pybfms.export_task(pybfms.uint32_t)
def _set_parameters(self, msg_sz):
print("TraceBFM: msg_sz=%d" % msg_sz)
self.msg_sz = msg_sz
@pybfms.export_task()
def _reset(self):
self.is_reset = True
self.reset_ev.set()
def set_msg(self, idx, msg):
self._set_msg(idx, msg)
def clr_msg(self, idx):
self._clr_msg(idx)
def _set_msg(self, idx, msg):
self._clr_msg(idx)
if len(msg) > self.msg_sz:
msg = msg[0:self.msg_sz-3]
msg += "..."
for i,c in enumerate(msg.encode()):
self._set_msg_c(idx, i, c)
@pybfms.import_task(pybfms.uint8_t,pybfms.uint8_t,pybfms.uint8_t)
def _set_msg_c(self, msg, idx, c):
pass
@pybfms.import_task(pybfms.uint8_t)
def _clr_msg(self, idx):
pass
| [
"matt.ballance@gmail.com"
] | matt.ballance@gmail.com |
f9ef15b9bfd358fbcf78dc3c8a2c94f18e736c13 | b61573aeb976040f0b1ba67900ec28b14a2652dc | /torchaudio/functional/__init__.py | bf27168cbbd127df9a24eecd273c32234c1e630d | [
"BSD-2-Clause"
] | permissive | TrendingTechnology/audio | a84c7408f78a20cf6cf5456e2f12b284491a28ce | 2aad928903f2f0f9a05af9a68ac2ed203faf1093 | refs/heads/main | 2023-07-08T03:48:08.350672 | 2021-08-05T17:52:02 | 2021-08-05T17:52:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | from .functional import (
amplitude_to_DB,
angle,
complex_norm,
compute_deltas,
compute_kaldi_pitch,
create_dct,
create_fb_matrix,
melscale_fbanks,
linear_fbanks,
DB_to_amplitude,
detect_pitch_frequency,
griffinlim,
magphase,
mask_along_axis,
mask_along_axis_iid,
mu_law_encoding,
mu_law_decoding,
phase_vocoder,
sliding_window_cmn,
spectrogram,
spectral_centroid,
apply_codec,
resample,
edit_distance,
pitch_shift,
)
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dither,
dcshift,
deemph_biquad,
equalizer_biquad,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
__all__ = [
'amplitude_to_DB',
'angle',
'complex_norm',
'compute_deltas',
'compute_kaldi_pitch',
'create_dct',
'create_fb_matrix',
'melscale_fbanks',
'linear_fbanks',
'DB_to_amplitude',
'detect_pitch_frequency',
'griffinlim',
'magphase',
'mask_along_axis',
'mask_along_axis_iid',
'mu_law_encoding',
'mu_law_decoding',
'phase_vocoder',
'sliding_window_cmn',
'spectrogram',
'spectral_centroid',
'allpass_biquad',
'band_biquad',
'bandpass_biquad',
'bandreject_biquad',
'bass_biquad',
'biquad',
'contrast',
'dither',
'dcshift',
'deemph_biquad',
'equalizer_biquad',
'flanger',
'gain',
'highpass_biquad',
'lfilter',
'lowpass_biquad',
'overdrive',
'phaser',
'riaa_biquad',
'treble_biquad',
'vad',
'apply_codec',
'resample',
'edit_distance',
'pitch_shift',
]
| [
"noreply@github.com"
] | TrendingTechnology.noreply@github.com |
6753a026018172376dbb6693b26fef5fc2ed39e7 | 9142c3ebb20bdeab4d2b7e8e70ab562ce65cfe59 | /thermosteam/utils/decorators/units_of_measure.py | f631bb7d39c4e3b85727446e02a6a19a0ae8d2bf | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"NCSA"
] | permissive | BioSTEAMDevelopmentGroup/thermosteam | ce97fe2e5e2a5d21a9715435f653e3ee7e706e00 | 934e99441acfdb89d72dc99fee7b9c6def5aef6f | refs/heads/master | 2023-08-08T00:17:47.366975 | 2023-07-28T05:28:31 | 2023-07-28T05:28:31 | 219,133,879 | 46 | 14 | NOASSERTION | 2023-02-16T19:44:16 | 2019-11-02T09:50:30 | Python | UTF-8 | Python | false | false | 2,204 | py | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2023, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import thermosteam as tmo
from typing import Optional
__all__ = ('units_of_measure',)
def units_of_measure(dct, cls=None):
if cls is None:
return lambda cls: units_of_measure(dct, cls)
else:
cls.define_property = define_property
cls._units_of_measure = dct
cls.get_property = get_property
cls.set_property = set_property
return cls
@classmethod
def define_property(cls, name, units, fget, fset=None):
cls._units_of_measure[name] = tmo.units_of_measure.AbsoluteUnitsOfMeasure(units)
if hasattr(cls, name): raise ValueError(f"property with name '{name}' already exists")
setattr(cls, name, property(fget, fset))
def get_property(self, name: str, units: Optional[str]=None):
"""
Return property in requested units.
Parameters
----------
name :
Name of property.
units :
Units of measure. Defaults to the property's original units of measure.
"""
value = getattr(self, name)
if units is None:
return value
else:
units_dct = self._units_of_measure
if name in units_dct:
original_units = units_dct[name]
else:
raise ValueError(f"'{name}' is not a property")
return original_units.convert(value, units)
def set_property(self, name: str, value: float, units: Optional[str]=None):
"""
Set property in given units.
Parameters
----------
name :
Name of property.
value :
New value of property.
units :
Units of measure.
"""
units_dct = self._units_of_measure
if name in units_dct:
if units is not None:
original_units = units_dct[name]
value = original_units.unconvert(value, units)
setattr(self, name, value)
else:
raise ValueError(f"no property with name '{name}'")
| [
"yoelcortes@gmail.com"
] | yoelcortes@gmail.com |
bef2bbff53a217bbd10fec6672006ab131eda1d4 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py | dc68ac3521998969963c0e69734771dd04f673e6 | [
"GPL-3.0-only",
"MIT",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 2,624 | py | #!/usr/bin/python
# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: utm_network_interface_address_info
author:
- Juergen Wiebe (@steamx)
short_description: Get info for a network/interface_address object
description:
- Get info for a network/interface_address object in SOPHOS UTM.
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
extends_documentation_fragment:
- community.general.utm
'''
EXAMPLES = """
- name: utm network interface address
utm_proxy_interface_address_info:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestNetworkInterfaceAddress
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: str
_locked:
description: Whether or not the object is currently locked
type: bool
_type:
description: The type of the object
type: str
name:
description: The name of the object
type: str
address:
description: The ip4 address of the network/interface_address object
type: str
address6:
description: The ip6 address of the network/interface_address object
type: str
comment:
description: The comment string
type: str
resolved:
description: Whether or not the object is resolved
type: bool
resolved6:
description: Whether or not the object is resolved
type: bool
"""
from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "network/interface_address"
key_to_check_for_changes = []
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True)
)
)
try:
UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| [
"joshuamadison+gh@gmail.com"
] | joshuamadison+gh@gmail.com |
a3e6f84aabc48b8319995511b742d221aa8a1507 | 4b02aa96b41c7852678e7c9b3361830b2d1a1a09 | /LeetCode-solution/problems/palindrome_linked_list/solution.py | 93957b43d3eafe756306421566a762c41d6dcb74 | [] | no_license | arifkhan1990/LeetCode-solution | 4a4124d6b41dc516b673d1b1adc693054a00509f | 85e1a3a285ee059dce091621b79312ba96024eed | refs/heads/master | 2023-01-13T17:26:13.720649 | 2023-01-12T17:35:39 | 2023-01-12T17:35:39 | 243,922,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if not head or not head.next:
return True
slow = fast = curr = head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
stack = [slow.val]
while slow.next:
slow = slow.next
stack.append(slow.val)
while stack:
if stack.pop() != curr.val:
return False
curr = curr.next
return True | [
"arifkhanshubro@gmail.com"
] | arifkhanshubro@gmail.com |
511ca973f7ddab124a8fa1f2c8be06fb5db51303 | 2abd291027ea831fe85ffa8d929e769612f1bc9c | /09 - ginger/app/models/user.py | 7f7a4258475c332502476fd5fd84159c8605f73a | [] | no_license | hippieZhou/Python-Web-Every-Day | 13767ec8fbacfca29e3add0a88976b2afa74d5f5 | 7fc3f4b1378daccdfabc3ca6b66d4f5d4a98e360 | refs/heads/master | 2022-03-10T00:16:25.333925 | 2022-03-05T02:40:26 | 2022-03-05T02:40:26 | 175,198,664 | 5 | 1 | null | 2022-03-05T02:40:51 | 2019-03-12T11:40:10 | HTML | UTF-8 | Python | false | false | 1,508 | py | from sqlalchemy import Column, Integer, String, SmallInteger
from werkzeug.security import generate_password_hash, check_password_hash
from app.models.base import Base, db
from app.libs.error_code import NotFound, AuthFailed
import datetime
class User(Base):
id = Column(Integer, primary_key=True)
email = Column(String(24), unique=True, nullable=False)
nickname = Column(String(24), unique=True)
auth = Column(SmallInteger, default=1)
# time = datetime.date(2018, 5, 20)
_password = Column('password', String(100))
def keys(self):
return ['id', 'email', 'nickname', 'auth']
@property
def password(self):
return self._password
@password.setter
def password(self, raw):
self._password = generate_password_hash(raw)
@staticmethod
def register_by_email(nickname, account, secret):
with db.auto_commit():
user = User()
user.nickname = nickname
user.email = account
user.password = secret
db.session.add(user)
@staticmethod
def verify(email, password):
user = User.query.filter_by(email=email).first_or_404()
if not user.check_password(password):
raise AuthFailed()
scope = 'AdminScope' if user.auth == 2 else 'UserScope'
return {'uid': user.id, 'scope': scope}
def check_password(self, raw):
if not self._password:
return False
return check_password_hash(self._password, raw)
| [
"hippiezhou@outlook.com"
] | hippiezhou@outlook.com |
aeb3f5158fe2c3a4ffb56b2dfcb9d92b091d9a8d | d768f07ed90c0274e2d9d935eaf5ecfe734a1f56 | /lya_statistics/old/compute_power_spectrum_statistics.py | c25a67cceabb721282d7af4e1a083a078453dd85 | [] | no_license | bvillasen/simulation_analysis | cfd0b5de865d2fb5992d828b2824079e6798774b | 645f0c397172ed30a713368942eec9ca68a9761a | refs/heads/master | 2023-06-02T19:06:39.851760 | 2021-06-25T18:40:58 | 2021-06-25T18:40:58 | 298,894,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,280 | py | import os, sys
import numpy as np
import h5py as h5
import pickle
root_dir = os.path.dirname(os.getcwd()) + '/'
subDirectories = [x[0] for x in os.walk(root_dir)]
sys.path.extend(subDirectories)
from tools import *
from stats_functions import compute_distribution, get_highest_probability_interval
use_mpi = False
if use_mpi :
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nprocs = comm.Get_size()
else:
rank = 0
nprocs = 1
print_out = False
if rank == 0: print_out = True
#
# parameters = sys.argv
# if print_out: print( parameters )
# for option in parameters:
# if option.find("uvb") != -1: uvb = option[option.find('=')+1:]
# if print_out: print( f'Snapshot: {n_snap}' )
n_points = 2048
# uvb = 'pchw18'
uvb = 'hm12'
# dataDir = '/home/bruno/Desktop/ssd_0/data/'
dataDir = '/raid/bruno/data/'
# dataDir = '/data/groups/comp-astro/bruno/'
simulation_dir = dataDir + 'cosmo_sims/2048_hydro_50Mpc/'
input_dir = simulation_dir + 'transmited_flux_{0}_review/flux_power_spectrum_new/'.format(uvb)
output_dir = simulation_dir + 'transmited_flux_{0}_review/flux_power_spectrum_new/'.format(uvb)
if rank == 0: create_directory( output_dir )
snaps = [ 83, 90, 96, 102, 119, 124, 130, 136, 143, 151, 159, 169, ]
snaps_boss = [ 96, 102, 106, 110, 114, 119, 124, 130, 136, 143, 151, 159 ]
snapshots = list( set( snaps_boss ).union(set(snaps)))
snapshots.sort()
print(snapshots)
# n_snap = 159
for n_snap in snapshots:
file_name = input_dir + f'flux_ps_{n_snap}.h5'
print( f'Loading File: {file_name}' )
file = h5.File( file_name, 'r')
current_z = file.attrs['current_z']
vel_Hubble = file['vel_Hubble'][...]
k_vals = file['k_vals'][...]
ps_vals = file['flux_power_spectrum'][...]
file.close()
n_skewers, n_bins = ps_vals.shape
vel_max = vel_Hubble.max()
print(f'N Skewers: {n_skewers} n_bins:{n_bins} ' )
n_bins_for_distribution = 100
fill_sum = 0.70
ps_stats = {}
# index = 6
for index in range( 25 ):
k_val = k_vals[index]
vel = 2*np.pi / k_val
stride = n_points * ( vel / vel_max )
n_steps = int( 2048 / stride )
stride = int( stride )
ids_1d = ( np.arange( 0, n_steps, 1 ) * stride ).astype( np.int )
n_1d = len( ids_1d )
n_independent = n_1d**2
print ( f' id: {index}, val: {k_val:.1e} n_independent: {n_independent}' )
delta_vals = ps_vals[:, index] * k_val / np.pi
delta_mean = delta_vals.mean()
delta_sigma = delta_vals.std()
distribution, bin_centers = compute_distribution( delta_vals, n_bins_for_distribution, log=True )
v_l, v_r, v_max, sum = get_highest_probability_interval( bin_centers, distribution, fill_sum, log=True, n_interpolate=1000)
ps_stats[index] = {}
ps_stats[index]['k_val'] = k_val
ps_stats[index]['bin_centers'] = bin_centers
ps_stats[index]['distribution'] = distribution
ps_stats[index]['delta_mean'] = delta_mean
ps_stats[index]['delta_sigma'] = delta_sigma
ps_stats[index]['sigma_l'] = v_l
ps_stats[index]['sigma_r'] = v_r
ps_stats[index]['sigma_max'] = v_max
ps_stats[index]['n_independent'] = n_independent
n_indp_list = []
k_list = []
mean_list, sigma_list = [], []
sigma_asim_l, sigma_asim_r = [], []
for index in range( 25 ):
n_indp_list.append( ps_stats[index]['n_independent'] )
k_list.append( ps_stats[index]['k_val'] )
mean_list.append( ps_stats[index]['delta_mean'] )
sigma_list.append( ps_stats[index]['delta_sigma'] )
sigma_asim_l.append( ps_stats[index]['sigma_l'] )
sigma_asim_r.append( ps_stats[index]['sigma_r'] )
n_independent = np.array( n_indp_list )
k_array = np.array( k_list )
mean_array = np.array( mean_list )
sigma_array = np.array( sigma_list )
sigma_l_array = np.array( sigma_asim_l )
sigma_r_array = np.array( sigma_asim_r )
ps_stats['current_z'] = current_z
ps_stats['k_vals'] = k_array
ps_stats['n_independent'] = n_independent
ps_stats['delta_mean'] = mean_array
ps_stats['delta_sigma'] = sigma_array
ps_stats['delta_sigma_l'] = sigma_l_array
ps_stats['delta_sigma_r'] = sigma_r_array
file_name = output_dir + f'stats_{n_snap}.pkl'
f = open( file_name, 'wb' )
pickle.dump( ps_stats, f)
f.close()
print ( f'Saved File: {file_name }' )
| [
"bvillasen@gmail.com"
] | bvillasen@gmail.com |
e80e39f7489a25cbe588e5318e01220bb5737102 | e979b765416b947fd089339dd64732d5174e7058 | /FlattenNestedList.py | e403c2d47d188a3400f6cad8396bf089bbb8f891 | [] | no_license | 250mon/CodeWars | d86cdc8ea24bc781c9adf34c24a67195e544a4a1 | 9bea8df60646a052565ae5246144a9d53939b057 | refs/heads/main | 2023-03-15T17:00:34.500844 | 2021-03-25T03:59:49 | 2021-03-25T03:59:49 | 303,961,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | def flatten_list(n_list):
result_list = []
if not n_list:
return result_list
stack = [list(n_list)]
while stack:
c_num = stack.pop()
next = c_num.pop()
if c_num:
stack.append(c_num)
if isinstance(next, list):
if next:
stack.append(list(next))
else:
result_list.append(next)
result_list.reverse()
return result_list
if __name__ == '__main__':
test_list = [0, 10, [20, 30], 40, 50, [60, 70, 80], [90, [100, 110], 120]]
result = flatten_list(test_list)
print(result) | [
"lambki@naver.com"
] | lambki@naver.com |
2ad80a74ff04fdbe4a888ef01bd9c5e25fddc2ce | 5b95b83ba7e18cb40babab37bcb0f5b63bfef3bb | /script8.py | 1ebba089a116b4bec4fb6bc5dc27f3eecb5f4d8f | [] | no_license | Moandh81/w3ressources_python | d9269959cc35c1df4a0ca9d37575c94fb96195f6 | 7a3c65bca50097c2e9b92591443dcb6b03a384a3 | refs/heads/master | 2020-03-30T22:42:23.673212 | 2019-11-11T19:58:16 | 2019-11-11T19:58:16 | 151,675,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | #!/usr/bin/python
# -*- coding: utf-8 -*
#Python Data Type: List - Exercises,
#Write a Python program to print the numbers of a specified list after removing even numbers from it
liste=range(1,11)
i= 0
listepair=[]
while i<len(liste):
if liste[i] % 2 == 0:
listepair.append(liste[i])
i = i + 1
print(listepair) | [
"anis.dhouieb@gmail.com"
] | anis.dhouieb@gmail.com |
9852067a7f48d7c5a1c1a29d4b31449e2151ab87 | 4a0f8c5c0e8324fa614da776f2a704b5c369ccbb | /topologyTest/cp_to_Topology_folder.py | 6e1eac9c65ee2c283c15f3a1a8c2d39bc8d87e15 | [] | no_license | magic2du/contact_matrix | 9f8ae868d71e7e5c8088bf22a9407ea3eb073be6 | 957e2ead76fabc0299e36c1435162edd574f4fd5 | refs/heads/master | 2021-01-18T21:15:07.341341 | 2015-09-16T02:14:53 | 2015-09-16T02:14:53 | 24,237,641 | 0 | 0 | null | 2015-09-10T19:58:24 | 2014-09-19T16:48:37 | null | UTF-8 | Python | false | false | 535 | py | import os, sys
from dealFile import *
ToDoList=sys.argv[1]
listOfAll=readListFile(ToDoList)
listOfSuccess=[]
for folders in listOfAll:
if os.path.exists('/home/du/Protein_Protein_Interaction_Project/3did_15OCT2010/dom_dom_ints/'+folders):
sh='cp -ru /home/du/Protein_Protein_Interaction_Project/3did_15OCT2010/dom_dom_ints/'+folders+' /home/du/Protein_Protein_Interaction_Project/3did_15OCT2010/topologyTest/dom_dom_ints/'
os.system(sh)
listOfSuccess.append(folders)
writeListFile('listOfSuccessCopied_'+ToDoList,listOfSuccess)
| [
"magic2du@gmail.com"
] | magic2du@gmail.com |
90454f44990f308805cb1b8772805fccdc0273e4 | cc6e7f63eaf4b3570771c46fb8b24b88e6e1f59e | /beginner/154/A.py | 09f1a826e3d6bd6b508da9a58d092c35f84c391c | [] | no_license | kamojiro/atcoderall | 82a39e9be083a01c14445417597bf357e6c854a8 | 973af643c06125f52d302a5bc1d65f07a9414419 | refs/heads/master | 2022-07-12T00:14:38.803239 | 2022-06-23T10:24:54 | 2022-06-23T10:24:54 | 161,755,381 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #import sys
#input = sys.stdin.readline
def main():
s, t = input().split()
a, b = map( int, input().split())
u = input()
if u == s:
print(a-1, b)
else:
print(a, b-1)
if __name__ == '__main__':
main()
| [
"tamagoma002@yahoo.co.jp"
] | tamagoma002@yahoo.co.jp |
d67b434adfc58def665770ba75217dc4919beb9e | e9c4239c8064d882691314fd5b37208f10447173 | /leetcode/101-200题/177nthHighestSalary.py | 02a4420c9f64fca1244aec82b06600d9aa8dfc5f | [] | no_license | IronE-G-G/algorithm | 6f030dae6865b2f4ff4f6987b9aee06874a386c1 | 6f6d7928207534bc8fb6107fbb0d6866fb3a6e4a | refs/heads/master | 2020-09-21T03:02:20.908940 | 2020-03-22T15:19:41 | 2020-03-22T15:19:41 | 224,658,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | """
177 第N高的薪水
编写一个 SQL 查询,获取 Employee 表中第 n 高的薪水(Salary)。
+----+--------+
| Id | Salary |
+----+--------+
| 1 | 100 |
| 2 | 200 |
| 3 | 300 |
+----+--------+
例如上述 Employee 表,n = 2 时,应返回第二高的薪水 200。如果不存在第 n 高的薪水,那么查询应返回 null。
+------------------------+
| getNthHighestSalary(2) |
+------------------------+
| 200 |
+------------------------+
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/nth-highest-salary
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
"""
CREATE FUNCTION getNthHighestSalary(N INT) RETURNS INT
BEGIN
if N<0 then
return (select min(salary) from Employee);
else
set N=N-1;
RETURN (
# Write your MySQL query statement below.
select ifnull((select distinct salary from Employee order by salary desc limit N,1),null) as NthHighestSalary);
end if;
END
"""
| [
"linjh95@163.com"
] | linjh95@163.com |
d73bf4c4e161aa160e6327bec5770961ca88b0d2 | 63b0fed007d152fe5e96640b844081c07ca20a11 | /アルゴ式/グラフアルゴリズム/Q4. ベルマンフォード法 (1).py | 0ef10ba1f767d56b39f85b831aa906cd8a37d79c | [] | no_license | Nikkuniku/AtcoderProgramming | 8ff54541c8e65d0c93ce42f3a98aec061adf2f05 | fbaf7b40084c52e35c803b6b03346f2a06fb5367 | refs/heads/master | 2023-08-21T10:20:43.520468 | 2023-08-12T09:53:07 | 2023-08-12T09:53:07 | 254,373,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | N, M = map(int, input().split())
INF = 10**9
dist = [INF]*N
dist[0] = 0
for _ in range(M):
u, v, w = map(int, input().split())
dist[v] = min(dist[v], dist[u]+w)
print(*dist, sep="\n")
| [
"ymdysk911@gmail.com"
] | ymdysk911@gmail.com |
4441a0a988cb752bf38feafdae50934f047ffde8 | 8fb3931be18a592d230d4cff3c28742a150d13cb | /intrusiondetection_server/funcs_intrusiondetection/intrusiondetectionImpl.py | 35ac3953acea155ad1042c1dc30346b54f2694f2 | [] | no_license | rvilalta/IoT-YANG | cfc12c8a679d51a4455838cc46919a2f9be82e1f | b1317fb306c7c03e098ccb4c675d56464025c173 | refs/heads/master | 2021-01-18T20:17:11.493068 | 2016-08-29T10:41:08 | 2016-08-29T10:41:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,367 | py | import os.path, sys
sys.path.append(os.path.join('/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])))
import backend.backend as be
import cv2
import sys
import imutils
import threading
import numpy as np
import datetime
from objects_intrusiondetection.room import Status
def video_name(video_counter):
video_name='test'+str(video_counter)+'.avi'
return video_name
def init_video_recorder(h,w,fps):
fourcc = cv2.cv.FOURCC(*'H264')
zeros = None
print "Starting video recording: " + video_name(video_counter)
writer = cv2.VideoWriter(video_name(video_counter), fourcc, fps, (w, h), True)
zeros = np.zeros((h, w), dtype="uint8")
return writer
def deinit_video_recorder(writer):
print "Stoping video recording"
writer.release()
is_video_init=False
writer = None
def transfer_file(filename):
#request to connect to storage server
print "transfer file " + filename
maxRetries = 20
video_counter=1
is_video_init=False
writer = None
thread1 = None
class MyThread (threading.Thread):
def __init__(self, thread_id, name, video_url, thread_lock):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.name = name
self.video_url = video_url
self.thread_lock = thread_lock
self._stop = threading.Event()
def run(self):
print "Starting " + self.name
window_name = self.name
cv2.namedWindow(window_name)
video = cv2.VideoCapture(self.video_url)
video.set(4,1024)
firstFrame=None
is_video_init=False
writer = None
#GET FPS
fps=video.get(cv2.cv.CV_CAP_PROP_FPS)
MIN_AREA=250
while True:
grabbed,frame= video.read()
text="Unoccupied"
# resize the frame, convert it to grayscale, and blur it
frame_resized = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < MIN_AREA:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame_resized, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
global video_counter
#Intrusion detected!
if (text=="Occupied" and is_video_init==False):
(h, w) = frame.shape[:2]
writer=init_video_recorder(h,w,fps)
is_video_init=True
be.intrusiondetection.sensors.status=Status(1)
#During intrusion we record
if text=="Occupied":
writer.write(frame)
#No longer intrusion - We store and transfer
if text=="Unoccupied" and is_video_init==True:
deinit_video_recorder(writer)
transfer_file(video_name(video_counter))
is_video_init=False
video_counter+=1
be.intrusiondetection.sensors.status=Status(2)
cv2.putText(frame, "Room Status: {}".format(text), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow(window_name, frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
cv2.imshow("Security Feed", frame_resized)
cv2.waitKey(1)
#update first frame
del(firstFrame)
firstFrame = gray
del (frame)
key = cv2.waitKey(50)
if self._stop.isSet():
break
print self.name + " Exiting"
cv2.waitKey(1000)
cv2.DestroyAllWindows()
def stop(self):
print self.name + " Stopped"
self._stop.set()
class IntrusiondetectionImpl:
@classmethod
def put(cls, intrusiondetectionschema):
print str(intrusiondetectionschema)
print 'handling put'
if "disarmed" in str(intrusiondetectionschema):
#sys.exit(0)
# Stop the thread
print "Stop thread"
global thread1
thread1.stop()
elif "armed" in str(intrusiondetectionschema):
#Start the thread
thread_lock = threading.Lock()
global thread1
thread1 = MyThread(1, "Thread 1", 0, thread_lock)
thread1.start()
@classmethod
def post(cls, intrusiondetectionschema):
print str(intrusiondetectionschema)
print 'handling post'
be.intrusiondetection = intrusiondetectionschema
@classmethod
def delete(cls, ):
print 'handling delete'
if be.intrusiondetection:
del be.intrusiondetection
else:
raise KeyError('')
@classmethod
def get(cls, ):
print 'handling get'
if be.intrusiondetection:
return be.intrusiondetection
else:
raise KeyError('')
| [
"a@a.com"
] | a@a.com |
44da64af3f47165d63c8570ec96bdb194d74670e | 2245f0acc3f5682129491b245acd3fd8ab2e4128 | /Desafio111/utilidadesCeV/ex_111.py | bd920ee20cdf37563ff655b912dc745b5276f3b7 | [] | no_license | wherculano/Curso-em-Video-Python | 89bed7b7e01f25ba47efa511304d18448a47a4bb | 5506645a46973a5ccd2c3d5c1ff782c51181b4bf | refs/heads/master | 2022-04-12T08:26:26.616135 | 2020-03-26T17:53:21 | 2020-03-26T17:53:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | """
Crie um pacote chamado utilidadesCeV que tenha dois módulos internos chamados moeda e dado.
Transfira todas as funções utilizadas nos desafios 107, 108, 109 e 110
para o primeiro pacote e mantenha tudo funcionando.
Ex:
>>> moeda.resumo(850, 35, 22)
----------------------------------
RESUMO DO VALOR
----------------------------------
Preço analisado: R$850,00
Dobro do preço: R$1700,00
Metade do preço: R$425,00
35% de aumento: R$1147,50
22% de redução: R$663,00
----------------------------------
"""
from Desafio111.utilidadesCeV import moeda
# preco = float(input('Digite o preço: R$'))
# moeda.resumo(preco, 80, 35)
| [
"wagherculano@hotmail.com"
] | wagherculano@hotmail.com |
84a35beba16bd47d8a2654e62502bffbe5037477 | 3c114c083af073421fc0becfa4b4471ba1d77de5 | /google/two_sum_bst.py | 2b3e3f1c2703a1d548d36af103cb120dc7ea5901 | [] | no_license | alonsovidales/interview_questions | 99f757c7e35c5ede450be25d3bebd54a18b1312b | 5e63e238950c2f6bdfd3ff48311d6c69a676d382 | refs/heads/master | 2021-01-17T12:06:48.419891 | 2018-03-25T08:44:14 | 2018-03-25T08:44:14 | 30,909,319 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | """
Given a BST and a number x, check whether exists two nodes in the BST whose sum
equals to x. You can not use one extra array to serialize the BST and do a 2sum
solver on it.
"""
class Bst(object):
class BstNode(object):
def __init__(self, v, l=None, r=None):
self.v = v
self.l = l
self.r = r
def __init__(self):
self._tree = None
def add(self, v):
if self._tree is None:
self._tree = self.BstNode(v)
else:
tree = self._tree
while True:
if tree.v > v:
if tree.l is not None:
tree = tree.l
else:
tree.l = self.BstNode(v)
return
else:
if tree.r is not None:
tree = tree.r
else:
tree.r = self.BstNode(v)
return
def exists(self, v, x=None, node=None):
if node is None:
node = self._tree
if node.v == v and v != x:
return True
if v > node.v:
return node.r is not None and self.exists(v, x, node.r)
else:
return node.l is not None and self.exists(v, x, node.l)
def two_sum(self, x, node=None):
if node is None:
node = self._tree
if self.exists(x-node.v, node.v):
return True
return ((node.r is not None and self.two_sum(x, node.r)) or
(node.l is not None and self.two_sum(x, node.l)))
import unittest
class TestBst(unittest.TestCase):
def test_two_sum(self):
bst = Bst()
bst.add(4)
bst.add(3)
bst.add(5)
bst.add(8)
bst.add(1)
self.assertTrue(bst.two_sum(6))
self.assertTrue(bst.two_sum(8))
self.assertFalse(bst.two_sum(2))
self.assertFalse(bst.two_sum(1))
if __name__ == '__main__':
unittest.main()
| [
"alonso.vidales@tras2.es"
] | alonso.vidales@tras2.es |
ad11a8c211c0d94f0c80515ff0321a91d0538ace | 9f0532cd700a9cdaefeb6274608aa971c23a3be8 | /raspi_io/graph.py | 28c8fc0622e05512c33b588db3817e9af5d232a6 | [
"MIT"
] | permissive | Kassadinsw/raspi-io | ac494ede3a6404228eac19261d5b9b2eaba69f8f | 159e45120ca0ffc86549ad83ef31c140a8dd6e21 | refs/heads/master | 2020-05-20T11:23:19.134443 | 2018-07-20T08:41:39 | 2018-07-20T08:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | # -*- coding: utf-8 -*-
import os
from PIL import Image
from .client import RaspiWsClient
from .core import RaspiBaseMsg, RaspiAckMsg, get_binary_data_header
__all__ = ['MmalGraph', 'GraphInit', 'GraphClose', 'GraphProperty']
class GraphInit(RaspiBaseMsg):
_handle = 'init'
_properties = {'display_num'}
def __init__(self, **kwargs):
super(GraphInit, self).__init__(**kwargs)
class GraphClose(RaspiBaseMsg):
_handle = 'close'
def __init__(self, **kwargs):
super(GraphClose, self).__init__(**kwargs)
class GraphProperty(RaspiBaseMsg):
_handle = 'get_property'
_properties = {'property'}
URI, IS_OPEN, DISPLAY_NUM = 1, 2, 3
def __init__(self, **kwargs):
super(GraphProperty, self).__init__(**kwargs)
class MmalGraph(RaspiWsClient):
LCD = 4
HDMI = 5
REDUCE_SIZE_FORMAT = ("BMP",)
PATH = __name__.split(".")[-1]
def __init__(self, host, display_num=HDMI, reduce_size=True, timeout=3, verbose=1):
"""Display a graph on raspberry pi specified monitor
:param host: raspberry pi address
:param display_num: display monitor number (HDMI or LCD)
:param reduce_size: reduce bmp graph size then transfer
:param timeout: raspi-io timeout unit second
:param verbose: verbose message output
"""
super(MmalGraph, self).__init__(host, str(display_num), timeout, verbose)
ret = self._transfer(GraphInit(display_num=display_num))
if not isinstance(ret, RaspiAckMsg) or not ret.ack:
raise RuntimeError(ret.data)
self.__uri = ""
self.__reduce_size = reduce_size
def __del__(self):
try:
self.close()
except AttributeError:
pass
@property
def uri(self):
return self.__uri
@property
def is_open(self):
ret = self._transfer(GraphProperty(property=GraphProperty.IS_OPEN))
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else False
@property
def display_num(self):
ret = self._transfer(GraphProperty(property=GraphProperty.DISPLAY_NUM))
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None
def open(self, path, reduce_size=None):
"""Open an image display on raspberry pi via mmal video core
:param path:
:param reduce_size: reduce bmp graph size then transfer
:return:
"""
self.__uri = ""
png_path = "{}.png".format(os.path.basename(path))
reduce_size = reduce_size if reduce_size is not None else self.__reduce_size
try:
# Open original file
image = Image.open(path)
fmt = image.format
# Reduce image size to png format
if reduce_size and fmt in self.REDUCE_SIZE_FORMAT:
image.save(png_path)
path = png_path
fmt = "PNG"
# Read data to memory
with open(path, "rb") as fp:
data = fp.read()
# First transfer header info
if self._send_binary_data(get_binary_data_header(data, fmt, "open"), data):
self.__uri = path
return True
else:
return False
except IOError as err:
self._error("Open error:{}".format(err))
return False
finally:
if os.path.isfile(png_path):
os.remove(png_path)
def close(self):
ret = self._transfer(GraphClose())
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else False
| [
"amaork@gmail.com"
] | amaork@gmail.com |
22cce2269ea2ed4befefe7ca4abc2ac9e571ba4b | de861acdf4d51a766512be0834055ad403916677 | /xii/meshing/tikz.py | 1bb1f2099ba7f87d45925685e176cfef56a97c9b | [
"MIT"
] | permissive | ChaogeCanFly/fenics_ii | 847c3faf4e1bf591addbe5a279980497f87d9c90 | 49a18855d077ab6e63e4f0b4d6a2f061de7f36ba | refs/heads/master | 2022-12-05T04:25:53.241712 | 2020-04-17T07:40:32 | 2020-04-17T07:40:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | from dolfin import *
template=r'''
\documentclass{standalone}
\usepackage{tikz}
\usetikzlibrary{calc}
\usetikzlibrary{shapes, snakes, patterns, arrows}
\usepackage{pgfplots}
\usepackage{pgfplotstable}
\usepackage{amsmath, amssymb}
\begin{document}
\begin{tikzpicture}
%(body)s
\end{tikzpicture}
\end{document}
'''
def tikzify_2d_mesh(facet_info, cell_info=None, vertex_info=None):
'''Standalone Tikz representation of the mesh'''
body = []
if cell_info is not None:
cell_markers, cell_style_map = cell_info
assert cell_style_map is not None
mesh = cell_markers.mesh()
x = mesh.coordinates()
code = r'\fill[%s] (%g, %g) -- (%g, %g) -- (%g, %g) -- cycle;'
for idx, cell in enumerate(mesh.cells()):
style = cell_style_map[cell_markers[idx]]
body.append(code % ((style, ) + tuple(x[cell].flatten())))
if isinstance(facet_info, tuple):
facet_info = [facet_info]
for fi in facet_info:
facet_markers, facet_style_map = fi
mesh = facet_markers.mesh()
assert mesh.geometry().dim() == 2
x = mesh.coordinates()
dim = facet_markers.dim()
assert dim == 1
mesh.init(dim)
mesh.init(dim, 0)
line = r'\draw[%(style)s] (%(x00)g, %(x01)g) -- (%(x10)g, %(x11)g);'
for facet in facets(mesh):
style = facet_style_map[facet_markers[facet]]
if style is not None:
x0, x1 = x[facet.entities(0)]
body.append(line % {'style': style, 'x00': x0[0], 'x01': x0[1],
'x10': x1[0], 'x11': x1[1]})
if vertex_info is not None:
if isinstance(vertex_info, tuple):
vertex_info = [vertex_info]
for vi in vertex_info:
vertex_markers, vertex_style_map = vi
assert vertex_style_map is not None
mesh = vertex_markers.mesh()
x = mesh.coordinates()
code = r'\node[%s] at (%g, %g) {%s};'
for idx, vtx in enumerate(mesh.coordinates()):
style, marker = vertex_style_map[vertex_markers[idx]]
if style is not None:
body.append(code % (style, vtx[0], vtx[1], marker))
body = '\n'.join(body)
return template % {'body': body}
def load_mesh(h5_file, data_sets):
'''
Read in mesh and mesh functions from the data set in HDF5File.
Data set is a tuple of (topological dim of entities, data-set-name)
'''
h5 = HDF5File(mpi_comm_world(), h5_file, 'r')
mesh = Mesh()
h5.read(mesh, 'mesh', False)
mesh_functions = []
for dim, ds in data_sets:
if h5.has_dataset(ds):
f = MeshFunction('size_t', mesh, dim, 0)
h5.read(f, ds)
else:
f = None
mesh_functions.append(f)
return mesh, mesh_functions
# -------------------------------------------------------------------
if __name__ == '__main__':
from itertools import repeat
path = './round_bdry.geo_d0.03125_0.5.h5'
dim = 2
mesh, [subdomains, bdries] = load_mesh(path, data_sets=((dim, 'volumes'), (dim-1, 'surfaces'), ))
# style_map = dict(zip(set(bdries.array()), repeat('black!50!white, very thin')))
style_map = dict(zip(set(bdries.array()), repeat(None)))
style_map[1] = 'red, very thin'
code = tikzify_2d_mesh(bdries, style_map)
with open('mesh_2d.tex', 'w') as f: f.write(code)
mesh = UnitSquareMesh(2, 2)
facet_f = MeshFunction('size_t', mesh, 1, 0)
DomainBoundary().mark(facet_f, 1)
facet_style_map = {0: 'black', 1: 'black'}
cell_f = MeshFunction('size_t', mesh, 2, 0)
cell_f[0] = 1
cell_style_map = {0: 'red', 1: 'blue'}
code = tikzify_2d_mesh((facet_f, facet_style_map)
(cell_f, cell_style_map))
with open('mesh_2d.tex', 'w') as f: f.write(code)
| [
"miroslav.kuchta@gmail.com"
] | miroslav.kuchta@gmail.com |
0d30bcc4d29dbf9b8f231055058bc5135d84c218 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/420/usersdata/329/88117/submittedfiles/exe11.py | 36546467df9037e9e9fe2183694c2c0da1de4c3f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # -*- coding: utf-8 -*-
n = int(input("digite um numero com 8 algarismos: "))
resto = n % 10
n = (n - resto)/10
soma = soma + resto
while soma < 72 :
print ('%d' % soma)
while soma > 1:
resto = n % 10
n = (n - resto)/10
soma = soma + resto
print ('%d' % soma)
while soma > 72:
print('NAO SEI')
while n < 1:
print('NAO SEI')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b9ae865af4885206e8591c08daf99dfb5e4e0c87 | 048eaa32bcd05ee278e6f391f9091f1848022987 | /dbdk/ai_based_adaptive_security_system/test_model_ig_init_train.py | b0db71af2f9704dba04a033ca5d993f8cdd48e5d | [] | no_license | kendricklee91/portfolio | 613e14fd6f13027c6d68d56b1b3d96b186de56b1 | ef92604a1e14a3b81ae5e91883c07501def6b3da | refs/heads/master | 2022-11-15T01:00:55.724686 | 2022-10-27T06:47:44 | 2022-10-27T06:47:44 | 170,043,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | from ksv_model.model_payload import known_model, known_model_retrain, known_model_hopt, known_model_inference, data_load_save
from ksv_model.model_ig import ModelIG, ig_data_load_save
import ksv_model.config.const as cst
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import roc_auc_score
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval
from scipy import stats
import pandas as pd
import unittest
import json
import os
# import pprint # for test
class TestModel1st(unittest.TestCase):
# Fixture
def setUp(self):
pass
def tearDown(self):
pass
# normal train test of sql injection model
def test_ig_train(self):
load_file_dir = os.path.join(cst.PATH_DATA, 'ig_ps_20190830_01.csv')
save_model_dir = os.path.join(cst.PATH_MODEL, 'ig_train_01.json')
save_model_wt_dir = os.path.join(cst.PATH_MODEL, 'ig_train_01.h5')
# model parameters
model_params = os.path.join(cst.PATH_CONFIG, 'model_ig_param.json')
with open(model_params, encoding='utf-8') as json_file:
params = json.load(json_file)
kmt = ModelIG()
model, hist, loss, acc = kmt.create_model(load_file_dir, params)
# print(acc) # for test
# save model
dls = ig_data_load_save()
save_result = dls.save_model_and_weight(model, save_model_dir, save_model_wt_dir)
if __name__ == "__main__":
unittest.main() | [
"noreply@github.com"
] | kendricklee91.noreply@github.com |
2cba5f29bc01c1976a25da33aa9cd8b4d8ef6a2c | 49bf36ba213b28d4aaeb63feba632fb05710d565 | /Python/BOJ/2941.py | 24dae225acced1392db2bc7a11b07906bb71c616 | [] | no_license | ohmozi/Algorithm | fc3fc861d4125b642d64b6e344eca806d137d0f2 | 436a376b009e8c073ceebc6b1e29b32b63c15a07 | refs/heads/master | 2023-07-23T16:01:49.774331 | 2021-08-16T02:08:56 | 2021-08-16T02:08:56 | 284,995,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # 크로아티아 알파벳
croatian = ['c=','c-','d-','lj','nj','s=','z=']
# "dz="는 예외
text = input()
i = 0
count= 0
# 3,2,1순으로 확인하기
while i < len(text):
temp = text[i:i+3]
if temp == "dz=":
# print("cro", temp)
i += 3
else:
temp = text[i:i+2]
if temp in croatian:
temp = text[i:i+2]
# print("cro",temp)
i += 2
else:
temp = text[i]
# print("not cro", temp)
i += 1
count += 1
print(count) | [
"gown10195@gmail.com"
] | gown10195@gmail.com |
75c467a86726f93b6c2fe1be168a9a16d4ee2d79 | 6a3af6fe669b2e17db1fa7d0751cbc4e04948079 | /fn_sdk_test/fn_sdk_test/components/funct_utilities_pdfid.py | a99c6c6446ee1d02426ea7c1c986e1446f201611 | [
"MIT"
] | permissive | jjfallete/resilient-community-apps | 5f0a728fe0be958acc44d982bf0289959f84aa20 | 2e3c4b6102555517bad22bf87fa4a06341714166 | refs/heads/master | 2022-04-17T13:20:36.961976 | 2020-04-13T07:03:54 | 2020-04-13T07:03:54 | 169,295,943 | 1 | 0 | MIT | 2020-04-13T07:03:56 | 2019-02-05T19:06:57 | Python | UTF-8 | Python | false | false | 2,539 | py | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
PACKAGE_NAME = "fn_sdk_test"
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'utilities_pdfid''"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get(PACKAGE_NAME, {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get(PACKAGE_NAME, {})
@function("utilities_pdfid")
def _utilities_pdfid_function(self, event, *args, **kwargs):
"""Function: Produces summary information about the structure of a PDF file, using Didier Stevens' pdfid (https://blog.didierstevens.com/programs/pdf-tools/). Provide the PDF file content as a base64-encoded string, for example the output from the “Attachment to Base64” function.
This function is useful in initial triage of suspicious email attachments and other files. It allows you to identify PDF documents that contain (for example) JavaScript or that execute an action when opened. PDFiD also handles name obfuscation. The combination of PDF automatic action and JavaScript makes a document very suspicious."""
try:
# Get the wf_instance_id of the workflow this Function was called in
wf_instance_id = event.message["workflow_instance"]["workflow_instance_id"]
yield StatusMessage("Starting 'utilities_pdfid' running in workflow '{0}'".format(wf_instance_id))
# Get the function parameters:
base64content = kwargs.get("base64content") # text
log = logging.getLogger(__name__)
log.info("base64content: %s", base64content)
##############################################
# PUT YOUR FUNCTION IMPLEMENTATION CODE HERE #
##############################################
yield StatusMessage("Finished 'utilities_pdfid' that was running in workflow '{0}'".format(wf_instance_id))
results = {
"content": "xyz"
}
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
| [
"ihor.husar@ibm.com"
] | ihor.husar@ibm.com |
4fd4c2d22d7b4d50e4eb887b4ecc430a0c2dace9 | f340b9f47aaf11d95911074efd16e2878b4608c5 | /200111/Find_Leaves_of_Binary_Tree.py | 18926424e163a47aee3fc9c85898b4f174fd9609 | [] | no_license | Jsonghh/leetcode | 150020d1250a7e13e7387a545b4eb7df0de8f90b | 3a83c0b0bcc43f458f7fc54764f60e1104fcc12e | refs/heads/master | 2020-11-25T03:12:48.842151 | 2020-02-11T02:56:58 | 2020-02-11T02:56:58 | 228,475,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findLeaves(self, root: TreeNode) -> List[List[int]]:
ans = []
if not root:
return ans
while root:
leaves = []
if self.is_leaf(root, leaves):
leaves.append(root.val)
root = None
ans.append(leaves[:])
return ans
def is_leaf(self, node, leaves):
if not node.left and not node.right:
return True
if node.left and self.is_leaf(node.left, leaves):
leaves.append(node.left.val)
node.left = None
if node.right and self.is_leaf(node.right, leaves):
leaves.append(node.right.val)
node.right = None
return False
| [
"jiesonghe@outlook.com"
] | jiesonghe@outlook.com |
793b94513930d4a6f4168891222bebd9d24bc2cf | 75e641d2b33d0865e19193877e48748b3de5007c | /Parameter-Config/parameters_examples.py | 1b708cdceb37bafbe36ad059ac0e24e380d2c198 | [
"MIT"
] | permissive | bergolho/fractal-tree | dc3f7918ab6f1a6f714aaec56ee15e092e180733 | c55a375585aac5168063fe34d078b54d3f43364c | refs/heads/master | 2020-06-14T02:42:36.908723 | 2020-02-06T17:57:04 | 2020-02-06T17:57:04 | 194,871,693 | 0 | 0 | null | 2019-07-02T13:45:39 | 2019-07-02T13:45:39 | null | UTF-8 | Python | false | false | 5,275 | py | # -*- coding: utf-8 -*-
"""
This module contains the Parameters class that is used to specify the input parameters of the tree.
"""
import numpy as np
class Parameters():
"""Class to specify the parameters of the fractal tree.
Attributes:
meshfile (str): path and filename to obj file name.
filename (str): name of the output files.
init_node (numpy array): the first node of the tree.
second_node (numpy array): this point is only used to calculate the initial direction of the tree and is not included in the tree. Please avoid selecting nodes that are connected to the init_node by a single edge in the mesh, because it causes numerical issues.
init_length (float): length of the first branch.
N_it (int): number of generations of branches.
length (float): average lenght of the branches in the tree.
std_length (float): standard deviation of the length. Set to zero to avoid random lengths.
min_length (float): minimum length of the branches. To avoid randomly generated negative lengths.
branch_angle (float): angle with respect to the direction of the previous branch and the new branch.
w (float): repulsivity parameter.
l_segment (float): length of the segments that compose one branch (approximately, because the lenght of the branch is random). It can be interpreted as the element length in a finite element mesh.
Fascicles (bool): include one or more straigth branches with different lengths and angles from the initial branch. It is motivated by the fascicles of the left ventricle.
fascicles_angles (list): angles with respect to the initial branches of the fascicles. Include one per fascicle to include.
fascicles_length (list): length of the fascicles. Include one per fascicle to include. The size must match the size of fascicles_angles.
save (bool): save text files containing the nodes, the connectivity and end nodes of the tree.
save_paraview (bool): save a .vtu paraview file. The tvtk module must be installed.
"""
def __init__(self):
# Rabbit heart example
self.meshfile='Mesh/WSA_i.obj'
# LV
#self.filename='Purkinje-Networks/LV-rabbit'
#self.init_node=np.array([-7.35027,4.06893,0.244092])
#self.second_node=np.array([-6.97912,3.95322,0.334369])
#self.init_length=8.0
#Number of iterations (generations of branches)
#self.N_it=7
#Median length of the branches
#self.length=2.0
#Standard deviation of the length
#self.std_length = np.sqrt(0.2)*self.length
#Min length to avoid negative length
#self.min_length = self.length/2.0
#self.branch_angle=0.1
#self.w=0.02
#Length of the segments (approximately, because the lenght of the branch is random)
#self.l_segment=0.1
#self.Fascicles=True
# RV
self.filename='Purkinje-Networks/RV-rabbit'
self.init_node=np.array([-7.559,7.542,0.111])
self.second_node=np.array([-7.18805,7.47768,0.237085])
self.init_length=9.0
#Number of iterations (generations of branches)
self.N_it=7
#Median length of the branches
self.length=2.0
#Standard deviation of the length
self.std_length = np.sqrt(0.2)*self.length
#Min length to avoid negative length
self.min_length = self.length/2.0
self.branch_angle=0.07
self.w=0.03
#Length of the segments (approximately, because the lenght of the branch is random)
self.l_segment=0.1
self.Fascicles=True
###########################################
# Fascicles data
###########################################
self.fascicles_angles=[-1.5,.2] #rad
self.fascicles_length=[.5,.5]
# Save data?
self.save=True
self.save_paraview=True
'''
# Sphere example
self.meshfile='Mesh/sphere.obj'
self.filename='sphere-line'
self.init_node=np.array([-1.0 ,0., 0.])
self.second_node=np.array([-0.964, 0.00, 0.266 ])
self.init_length=0.5
#Number of iterations (generations of branches)
self.N_it=10
#Median length of the branches
#self.length=.3
self.length=2.0
#Standard deviation of the length
self.std_length = np.sqrt(0.2)*self.length
#Min length to avoid negative length
self.min_length = self.length/10.
self.branch_angle=0.15
self.w=0.1
#Length of the segments (approximately, because the lenght of the branch is random)
self.l_segment=.01
self.Fascicles=True
'''
'''
# Block example
#self.meshfile='Mesh/block_i.obj'
#self.filename='block-test'
#self.init_node=np.array([0.14,0.24,0.04])
#self.second_node=np.array([-0.04,0.06,-0.06])
#self.init_length=0.5
#Number of iterations (generations of branches)
self.N_it=10
#Median length of the branches
#self.length=.3
self.length=2.0
#Standard deviation of the length
self.std_length = np.sqrt(0.2)*self.length
#Min length to avoid negative length
self.min_length = self.length/10.
self.branch_angle=0.15
self.w=0.1
#Length of the segments (approximately, because the lenght of the branch is random)
self.l_segment=.01
self.Fascicles=True
'''
| [
"berg@ice.ufjf.br"
] | berg@ice.ufjf.br |
8cc06657b6869b3435b2d98c650dc7996905f496 | 8644a2174c3cb7ccfe211a5e49edffbcc3a74a46 | /Learning/Algorithms/DevideAndConquer/longest_com_prefix.py | eafc03411d5fb9a2d33de218b11db52537c54464 | [] | no_license | bhavya2403/Learning-Python | 9e7cc9dee21172321fb217cae27c8072357f71ce | 3898211b357fbab320010a82a4811b68611d0422 | refs/heads/main | 2023-03-24T03:19:49.989965 | 2021-03-22T20:11:04 | 2021-03-22T20:11:04 | 315,962,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | def combineTwo(strA, strB):
i = 0
m, n = len(strA), len(strB)
while i < min(m, n):
if strB[i] != strA[i]:
if not i:
return ''
return strB[:i]
i += 1
return strB[:i]
def longestCommonPrefix(arr, l, r):
if l==r:
return arr[l]
m = (l+r)//2
a = longestCommonPrefix(arr, l, m)
b = longestCommonPrefix(arr, m+1, r)
return combineTwo(a, b)
print(longestCommonPrefix(["geeksforgeeks", "geeks", "geek", "geezer"], 0, 3)) | [
"noreply@github.com"
] | bhavya2403.noreply@github.com |
0ce215260f26c84f7bd0381d434be95578624498 | e621a2e763709336894bb33623cf6d20d7858c6f | /Stepwise.py | 373650f95dae4d0688a030c7efa06ead06e3cbae | [] | no_license | myliu/document-classification | 49d688fe0b5fdd79a6d2fca36b78277d273762cd | 7b9078912a4fd770ea614660ec770ccbc23bfdd1 | refs/heads/master | 2021-01-01T05:38:25.318323 | 2013-09-16T01:22:53 | 2013-09-16T01:22:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | ## author: Mingyu Liu
## author: Shi He
import numpy as np
import string
from Dataset import *
def main():
d = Dataset("rec.sport.hockey.txt", "rec.sport.baseball.txt", cutoff=200)
(Xtrain, Ytrain, Xtest, Ytest) = d.getTrainAndTestSets(0.8, seed=100)
lam = 100
cols = []
currentError = 1
n = Xtrain.shape[1]
dic = {}
## i is the number of features to be added to cols
for i in range(40):
bestJ = 0
bestErrorRate = 1
for j in range(n):
cols.append(j)
w = trainRidge(Xtrain[:, cols], Ytrain, lam)
errorRate = computeError(Xtrain[:, cols], Ytrain, w)
if errorRate < bestErrorRate:
bestJ = j
bestErrorRate = errorRate
## print 'Best error rate is ' + str(bestErrorRate)
cols.pop()
if bestErrorRate >= currentError:
break
else:
cols.append(bestJ)
dic[bestJ] = currentError - bestErrorRate
currentError = bestErrorRate
print 'Current error rate is ' + str(currentError)
w = trainRidge(Xtrain[:, cols], Ytrain, lam)
trainError = computeError(Xtrain[:, cols], Ytrain, w)
print 'Train error rate is ' + str(trainError)
testError = computeError(Xtest[:, cols], Ytest, w)
print 'Test error rate is ' + str(testError)
## find the top 10 features
wordList = d.getWordList()
topCols = [(key, value) for key, value in sorted(dic.iteritems(), key = lambda(k, v) : (v, k), reverse = True)]
topCols = topCols[: 10]
topFeatures = [wordList[index] for (index, value) in topCols]
for f in topFeatures:
print f
def trainRidge(Xtrain, Ytrain, lam):
Xmatrix = np.asmatrix(Xtrain)
Ymatrix = np.asmatrix(Ytrain)
return np.linalg.inv(Xmatrix.T * Xmatrix + lam * np.eye(Xmatrix.shape[1])) * Xmatrix.T * Ymatrix
def computeError(Xtest, Ytest, w):
correctCount = 0
incorrectCount = 0
for testIndex in range(Ytest.size):
xtest = Xtest[testIndex]
xtestw = xtest * w
## if sign(xtestw) > 0, expected = 1; if sign(xtestw) <= 0, expected = -1
expected = -1
if xtestw > 0:
expected = 1
if expected == Ytest[testIndex]:
correctCount += 1
else:
incorrectCount += 1
return incorrectCount * 1.0 / (correctCount + incorrectCount)
if __name__ == "__main__":
main()
| [
"mliu@quantcast.com"
] | mliu@quantcast.com |
92e82e328ade7f03df9e0af8ba121385d3be56e6 | 98dde5ccdb145de9aab3e7233c3ec6c9c13a0649 | /controller/qt_classes/LineEditDelegate.py | eaedc896fc950ae5c89c31e9713ccb06d136e5a0 | [] | no_license | teamlm2/lm2 | f586aaf7af44cbb64964f2c2bfeffa3e902d4752 | 416cc189b6fc16bf61583891783eef7e4a9e1278 | refs/heads/master | 2018-12-22T18:38:19.360889 | 2018-10-04T02:12:11 | 2018-10-04T02:12:11 | 109,807,271 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | __author__ = 'anna'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class LineEditDelegate(QStyledItemDelegate):
def __init__(self, column, completer_value_list, parent):
super(LineEditDelegate, self).__init__(parent)
self.line_edit_column = column
self.completer_model = QStringListModel(completer_value_list)
self.completer_proxy_model = QSortFilterProxyModel()
self.completer_proxy_model.setSourceModel(self.completer_model)
self.completer = QCompleter(self.completer_proxy_model, self, activated=self.on_completer_activated)
self.completer.setCompletionMode(QCompleter.PopupCompletion)
self.parent = parent
def createEditor(self, widget, item, index):
if not index is None:
if index.isValid():
if index.column() == self.line_edit_column:
editor = QLineEdit(widget)
editor.setCompleter(self.completer)
editor.textEdited.connect(self.completer_proxy_model.setFilterFixedString)
return editor
@pyqtSlot(str)
def on_completer_activated(self, text):
if not text:
return
self.completer.activated[str].emit(text) | [
"aagii_csms@yahoo.com"
] | aagii_csms@yahoo.com |
6dbec1025dda199acee235c6639074adb8892917 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/ea888a3beaac408f9f93b24fd31d9d7a.py | afffcc7b61868c9975cd535c55505f2650b87b53 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 490 | py | class DNA:
dna_sequence = ''
def __init__(self, value):
self.dna_sequence = value
def to_rna(self):
new_sequence = ''
for letter in self.dna_sequence:
if letter == 'G':
new_sequence += 'C'
elif letter == 'C':
new_sequence += 'G'
elif letter == 'T':
new_sequence += 'A'
elif letter == 'A':
new_sequence += 'U'
return new_sequence
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
cd676e15a7d03535de5ff911140dc2d8ab4aa212 | ed1a4b2dba31905ccac09136a693c2d5c7697de8 | /helpers/create_user_csv.py | e397736e82540a5b553663cbada881455e949a72 | [] | no_license | twkampala/dhis2-config-exercise | 5580666cebcdf5c65cbb81174670d87266e98c8a | 76f835ea36ec6df64c04d6a207ef09176161843b | refs/heads/master | 2021-01-01T16:06:12.658622 | 2015-07-08T03:49:09 | 2015-07-08T03:49:09 | 38,424,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from faker import Faker
def decide_role(n, roles):
if((n+1) % 2 == 0):
return roles[1]
else:
return roles[0]
def create_csv_file(path, number_of_users):
f = Faker()
roles = ['ordinary user', 'admin user']
default_password = "secretPassword"
with open(path, "w") as file_handle:
file_handle.write("First Name,Last Name,Username,Email Address,Role,Password\n")
for n in range(number_of_users):
role = decide_role(n, roles)
password = default_password
file_handle.write("%s,%s,%s,%s,%s,%s\n" % (f.first_name(), f.last_name(), f.username(), f.email(), role, password))
if __name__ == "__main__":
create_csv_file("users.csv", 3)
| [
"jmured@gmail.com"
] | jmured@gmail.com |
6aa7b9e0e5cd9dc45f0c134f9da1ce1c4e431b5d | bcdf30ab17d406643fb8ec01bafcd6cbf625bb44 | /product_brand_pricelist/__openerp__.py | 14df672457ad1d4cab78b820f20afd4fcb8344fe | [] | no_license | more2makeTim/odoo-extra-addons | a37915da3407b38cf3fcfbdbecb67435cb7e8f76 | ac81232e4d360d8cd645b2d3471da8779d77a4a5 | refs/heads/8.0 | 2020-04-21T17:56:16.337747 | 2018-07-06T10:42:48 | 2018-07-06T10:42:48 | 94,529,482 | 0 | 0 | null | 2017-06-16T09:39:22 | 2017-06-16T09:39:22 | null | UTF-8 | Python | false | false | 1,303 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2015 ERP|OPEN (www.erpopen.nl).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Product Brand Pricelist',
'author': 'ERP|OPEN, André Schenkels',
'version': '8.0.1.0.0',
'website': 'www.erpopen.nl',
'license': 'AGPL-3',
'category': 'Product',
'depends': [
'product',
'product_brand'
],
'data': [
'views/product_pricelist_item.xml',
]
}
| [
"a.schenkels@ictstudio.eu"
] | a.schenkels@ictstudio.eu |
afdcc0c01670b6fc5fa0699e55af74a39a6142d1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_148/ch129_2020_04_01_17_10_28_115191.py | 34a06276257b7733d2482ed96600077a526ce61a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def verifica_quadrado_perfeito(x):
i = 1
n = x-i
while n!=i:
n == x-i
i+=2
if x-i == 0:
return True
elif x-i < 0:
return False | [
"you@example.com"
] | you@example.com |
a7c78601b7a6b035ad14b7ba9cb3399e16ee8108 | 6e1d6d058907e689207ca51525adb0fc11fb5f54 | /Chapter05/Exercise5.03/bookr/reviews/tests.py | 312a8d690b716c2aff67cfdb22e04c6bdf7f9d28 | [
"MIT"
] | permissive | lmoshood/The-Django-Workshop | a083b9f171e7f8388abd51ea82927377721d01a9 | 52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62 | refs/heads/master | 2022-04-20T20:13:59.917020 | 2020-04-19T23:23:19 | 2020-04-19T23:23:19 | 259,112,469 | 0 | 1 | null | 2020-04-26T19:11:57 | 2020-04-26T19:11:56 | null | UTF-8 | Python | false | false | 2,334 | py | import os
from urllib.request import urlopen
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
def read_content(path):
with open(path) as f:
return f.read()
class Exercise3Test(StaticLiveServerTestCase):
"""
These tests use `StaticLiveServerTestCase` and `urlopen` since the normal `TestCase` uses a special server that does
not serve static assets.
"""
def test_django_conf(self):
"""
Check that `reviews` is in `settings.INSTALLED_APPS` and that the static dir is set to <projectdir>/static.
"""
self.assertIn('reviews', settings.INSTALLED_APPS)
self.assertEquals([settings.BASE_DIR + '/static'], settings.STATICFILES_DIRS)
def test_main_css_get(self):
"""
Test that the main.css can be downloaded, and the content matches that on disk. This also checks that main.css
is in the right location and is being served using the static files finder.
Since we have the contents of the file we can check it has the right rules too.
"""
response = urlopen(self.live_server_url + '/static/main.css').read()
with open(os.path.join(settings.BASE_DIR, 'static', 'main.css'), 'rb') as f:
self.assertEqual(response, f.read())
self.assertIn(b'.navbar', response)
self.assertIn(b'.navbar-brand', response)
self.assertIn(b'.navbar-brand > img', response)
self.assertIn(b'body', response)
self.assertIn(b'h1, h2, h3, h4, h5, h6', response)
def test_base_html_content(self):
"""
In the base HTML we should see: {% load static %}, CSS loaded with {% static %} template tag, fonts load CSS
tag, and no <style>...</style> tags.
"""
base_template = read_content(os.path.join(settings.BASE_DIR, 'templates', 'base.html'))
self.assertIn('{% load static %}', base_template)
self.assertIn('<link rel="stylesheet" href="{% static \'main.css\' %}">', base_template)
self.assertIn('<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Libre+Baskerville|'
'Source+Sans+Pro&display=swap">', base_template)
self.assertNotIn('<style>', base_template)
self.assertNotIn('</style>', base_template)
| [
"ben@beneboy.co.nz"
] | ben@beneboy.co.nz |
134669add83e4232b2570c51e0fed52d4fb43c12 | 3416464630bc3322dd677001811de1a6884c7dd0 | /dynamic_program/q1143_longestCommonSubsequence/__init__.py | adf40b3d5d875edbfd4202d998f732b09bf6200a | [] | no_license | ttomchy/LeetCodeInAction | f10403189faa9fb21e6a952972d291dc04a01ff8 | 14a56b5eca8d292c823a028b196fe0c780a57e10 | refs/heads/master | 2023-03-29T22:10:04.324056 | 2021-03-25T13:37:01 | 2021-03-25T13:37:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
FileName: __init__.py.py
Description:
Author: Barry Chow
Date: 2020/10/15 10:19 AM
Version: 0.1
"""
from .DP_Solution import Solution
__all__ =[
'Solution'
] | [
"zhouenguo@163.com"
] | zhouenguo@163.com |
f35bb1db7e398b0c815fab2296103f35be66b629 | f6078890ba792d5734d289d7a0b1d429d945a03a | /hw2/submission/zhubenjamin/zhubenjamin_37891_1274820_Problem 1.py | 579a43701a6466f3d97ec4fe8b7c6cc99cbc2f7b | [] | no_license | huazhige/EART119_Lab | 1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0 | 47931d6f6a2c7bc053cd15cef662eb2f2027712c | refs/heads/master | 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,301 | py | # -*- coding: utf-8 -*-
"""
Created on Apr 21, 2019
Class = Astro/Eart 119
Homework 2 - Plots and Animations
Student = Benjamin Zhu (1696575)
"""
#============================================
# (a) imports
#============================================
import numpy as np
injWell = np.loadtxt('injWell_OK.txt').T #import the txt file while transposing them
seism = np.loadtxt('seism_OK.txt').T
#============================================
# (b) Convert to decimal years
#============================================
Yr = seism[1:2] #assigning the row of data files to their variables
Mo = seism[2:3]
Dy = seism[3:4]
Hr = seism[4:5]
Mn = seism[5:6]
Sc = seism[6:7]
DecYear = Yr + (Mo-1)/12 + (Dy-1)/365.25 + Hr/(365.25*24) +\
Mn/(365.25*24*60) + Sc/(365.25*24*3600) #calculations
print(DecYear)
#============================================
# (c) calculate earth quake rate (not solved)
#============================================
"""
def comp_rate( at, k_win):
# smoothed rate from overlapping sample windows normalized by delta_t
aS = np.arange( 0, at.shape[0]-k_win, 1)
aBin, aRate = np.zeros(aS.shape[0]), np.zeros(aS.shape[0])
iS = 0
for s in aS:
i1, i2 = s, s+k_win
aBin[iS] = 0.5*( at[i1]+at[i2])
aRate[iS] = k_win/( at[i2]-at[i1])
iS += 1
return aBin, aRate
#===================================================================================
# dir, file, and parameter
#===================================================================================
# for seism rate
k_win = 200
binsize = 10 # for histogram
# variables
t0 = float( ) # starting time of time axis
at = np.array([]) # time of seismicity
aMag = np.array([]) # magnitudes
aT_inj = np.array([]) # time of injections
aV = np.array([]) # injected volume
#aBin,aRate = np.array([]), np.array([]) # bins and seismicity rates
answer = comp_rate(at, k_win)
print (answer)
"""
#============================================
#
#============================================
#============================================
#
#============================================
| [
"hge2@ucsc.edu"
] | hge2@ucsc.edu |
9df1353b0a03a8a08e934b246193d8cde5896f35 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/storagemover/azure-mgmt-storagemover/generated_samples/job_definitions_list.py | 953d0df43bb9313da6edaeaf6657c5ef22dadf96 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,666 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.storagemover import StorageMoverMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-storagemover
# USAGE
python job_definitions_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = StorageMoverMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="11111111-2222-3333-4444-555555555555",
)
response = client.job_definitions.list(
resource_group_name="examples-rg",
storage_mover_name="examples-storageMoverName",
project_name="examples-projectName",
)
for item in response:
print(item)
# x-ms-original-file: specification/storagemover/resource-manager/Microsoft.StorageMover/stable/2023-03-01/examples/JobDefinitions_List.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
3788541c03d8e3cdd90d225eba3d8e953c24f588 | f07a42f652f46106dee4749277d41c302e2b7406 | /Test Set/Open Source Projects/tensorlayer/88d239631b9eb49527c21053d79d55e012f11a3c-2-bug.py | a5c542dc0897f55fcb1489019b0b411308f3883d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,490 | py | #!/usr/bin/env python
__doc__ = """
This demo implements FastText[1] for sentence classification. FastText is a
simple model for text classification with performance often close to
state-of-the-art, and is useful as a solid baseline.
There are some important differences between this implementation and what
is described in the paper. Instead of Hogwild! SGD[2], we use Adam optimizer
with mini-batches. Hierarchical softmax is also not supported; if you have
a large label space, consider utilizing candidate sampling methods provided
by TensorFlow[3].
After 5 epochs, you should get test accuracy close to 90.9%.
[1] Joulin, A., Grave, E., Bojanowski, P., & Mikolov, T. (2016).
Bag of Tricks for Efficient Text Classification.
http://arxiv.org/abs/1607.01759
[2] Recht, B., Re, C., Wright, S., & Niu, F. (2011).
Hogwild: A Lock-Free Approach to Parallelizing Stochastic Gradient Descent.
In Advances in Neural Information Processing Systems 24 (pp. 693–701).
[3] https://www.tensorflow.org/api_guides/python/nn#Candidate_Sampling
"""
import array
import hashlib
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
# Hashed n-grams with 1 < n <= N_GRAM are included as features
# in addition to unigrams.
N_GRAM = 2
# Size of vocabulary; less frequent words will be treated as "unknown"
VOCAB_SIZE = 100000
# Number of buckets used for hashing n-grams
N_BUCKETS = 1000000
# Size of the embedding vectors
EMBEDDING_SIZE = 50
# Number of epochs for which the model is trained
N_EPOCH = 5
# Size of training mini-batches
BATCH_SIZE = 32
# Path to which to save the trained model
MODEL_FILE_PATH = 'model.npz'
class FastTextClassifier(object):
"""Simple wrapper class for creating the graph of FastText classifier."""
def __init__(self, vocab_size, embedding_size, n_labels):
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.n_labels = n_labels
self.inputs = tf.placeholder(tf.int32, shape=[None, None], name='inputs')
self.labels = tf.placeholder(tf.int32, shape=[None], name='labels')
# Network structure
network = AverageEmbeddingInputlayer(self.inputs, self.vocab_size, self.embedding_size)
self.network = DenseLayer(network, self.n_labels)
# Training operation
cost = tl.cost.cross_entropy(self.network.outputs, self.labels, name='cost')
self.train_op = tf.train.AdamOptimizer().minimize(cost)
# Predictions
self.prediction_probs = tf.nn.softmax(self.network.outputs)
self.predictions = tf.argmax(self.network.outputs, axis=1, output_type=tf.int32)
# self.predictions = tf.cast(tf.argmax( # for TF < 1.2
# self.network.outputs, axis=1), tf.int32)
# Evaluation
are_predictions_correct = tf.equal(self.predictions, self.labels)
self.accuracy = tf.reduce_mean(tf.cast(are_predictions_correct, tf.float32))
def save(self, sess, filename):
tl.files.save_npz(self.network.all_params, name=filename, sess=sess)
def load(self, sess, filename):
tl.files.load_and_assign_npz(sess, name=filename, network=self.network)
def augment_with_ngrams(unigrams, unigram_vocab_size, n_buckets, n=2):
"""Augment unigram features with hashed n-gram features."""
def get_ngrams(n):
return list(zip(*[unigrams[i:] for i in range(n)]))
def hash_ngram(ngram):
bytes_ = array.array('L', ngram).tobytes()
hash_ = int(hashlib.sha256(bytes_).hexdigest(), 16)
return unigram_vocab_size + hash_ % n_buckets
return unigrams + [hash_ngram(ngram) for i in range(2, n + 1) for ngram in get_ngrams(i)]
def load_and_preprocess_imdb_data(n_gram=None):
"""Load IMDb data and augment with hashed n-gram features."""
X_train, y_train, X_test, y_test = \
tl.files.load_imdb_dataset(nb_words=VOCAB_SIZE)
if n_gram is not None:
X_train = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_train])
X_test = np.array([augment_with_ngrams(x, VOCAB_SIZE, N_BUCKETS, n=n_gram) for x in X_test])
return X_train, y_train, X_test, y_test
def train_test_and_save_model():
X_train, y_train, X_test, y_test = load_and_preprocess_imdb_data(N_GRAM)
classifier = FastTextClassifier(
vocab_size=VOCAB_SIZE + N_BUCKETS,
embedding_size=EMBEDDING_SIZE,
n_labels=2,
)
with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
for epoch in range(N_EPOCH):
start_time = time.time()
print('Epoch %d/%d' % (epoch + 1, N_EPOCH))
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size=BATCH_SIZE, shuffle=True):
sess.run(
classifier.train_op, feed_dict={
classifier.inputs: tl.prepro.pad_sequences(X_batch),
classifier.labels: y_batch,
})
print(" took %.5fs" % (time.time() - start_time))
test_accuracy = sess.run(
classifier.accuracy, feed_dict={
classifier.inputs: tl.prepro.pad_sequences(X_test),
classifier.labels: y_test,
})
print('Test accuracy: %.5f' % test_accuracy)
classifier.save(sess, MODEL_FILE_PATH)
if __name__ == '__main__':
train_test_and_save_model()
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
b4d9093c07bfb5ebb268fc66790a9f456208aeda | e3b89fc928ed736b1cdf7067d71c0f5f7d9e3586 | /encodeData.py | 9093b97281d18e236b8cd5e868d896c1ee0384cc | [] | no_license | tgadf/pymva | 0b801277b27eb626ee61424e6ef24716087ba582 | 960127c880e61732db77c1049a5fe5ab9918e534 | refs/heads/master | 2020-04-02T19:27:38.956710 | 2018-10-30T00:10:29 | 2018-10-30T00:10:29 | 154,734,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 23:17:31 2018
@author: tgadfort
"""
#conda install -c conda-forge category_encoders
#https://github.com/scikit-learn-contrib/categorical-encoding
import category_encoders as ce
encoder = ce.BackwardDifferenceEncoder(cols=[...])
encoder = ce.BinaryEncoder(cols=[...])
encoder = ce.HashingEncoder(cols=[...])
encoder = ce.HelmertEncoder(cols=[...])
encoder = ce.OneHotEncoder(cols=[...])
encoder = ce.OrdinalEncoder(cols=[...])
encoder = ce.SumEncoder(cols=[...])
encoder = ce.PolynomialEncoder(cols=[...])
encoder = ce.BaseNEncoder(cols=[...])
encoder = ce.LeaveOneOutEncoder(cols=[...]) | [
"tgadfort@gmail.com"
] | tgadfort@gmail.com |
39f4aec86bf95b756d12cd722cb068c5c35e5824 | 60d737103373825b858e67292865bda8c6f2094f | /active/theses-harvard.py | 52bc1449f4ad0047fdfea0a63b2cea46a68924ce | [] | no_license | fschwenn/ejlmod | fbf4692b857f9f056f9105a7f616a256725f03b6 | ef17512c2e44baa0164fdc6abc997c70ed3d2a74 | refs/heads/master | 2023-01-24T18:56:35.581517 | 2023-01-20T11:18:16 | 2023-01-20T11:18:16 | 91,459,496 | 1 | 1 | null | 2021-10-04T11:58:15 | 2017-05-16T13:06:57 | Python | UTF-8 | Python | false | false | 4,100 | py | # -*- coding: utf-8 -*-
#harvest theses from Harvard
#FS: 2020-01-14
import getopt
import sys
import os
import urllib2
import urlparse
from bs4 import BeautifulSoup
import re
import ejlmod2
import codecs
import datetime
import time
import json
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
xmldir = '/afs/desy.de/user/l/library/inspire/ejl'
retfiles_path = "/afs/desy.de/user/l/library/proc/retinspire/retfiles"#+'_special'
now = datetime.datetime.now()
stampoftoday = '%4d-%02d-%02d' % (now.year, now.month, now.day)
publisher = 'Harvard U. (main)'
rpp = 20
numofpages = 1
departments = [('m', 'Mathematics'), ('', 'Physics'), ('a', 'Astronomy'), ('c', 'Computer+Science')]
driver = webdriver.PhantomJS()
driver.implicitly_wait(30)
hdr = {'User-Agent' : 'Magic Browser'}
recs = []
for (fc, dep) in departments:
for i in range(numofpages):
tocurl = 'https://dash.harvard.edu/handle/1/4927603/browse?type=department&value=%s&rpp=%i&sort_by=2&type=dateissued&offset=%i&etal=-1&order=DESC' % (dep, rpp, i*rpp)
print '---{ %s }---{ %i/%i }---{ %s }------' % (dep, i+1, numofpages, tocurl)
req = urllib2.Request(tocurl, headers=hdr)
tocpage = BeautifulSoup(urllib2.urlopen(req), features="lxml")
time.sleep(10)
for div in tocpage.body.find_all('div', attrs = {'class' : 'artifact-description'}):
for a in div.find_all('a'):
rec = {'tc' : 'T', 'jnl' : 'BOOK', 'oa' : False, 'note' : [ dep ]}
rec['link'] = 'https://dash.harvard.edu' + a['href']
rec['tit'] = a.text.strip()
if fc: rec['fc'] = fc
recs.append(rec)
jnlfilename = 'THESES-HARVARD-%s' % (stampoftoday)
j = 0
for rec in recs:
j += 1
print '---{ %i/%i }---{ %s }------' % (j, len(recs), rec['link'])
try:
driver.get(rec['link'])
artpage = BeautifulSoup(driver.page_source, features="lxml")
except:
time.sleep(60)
print 'wait a minute'
driver.get(rec['link'])
artpage = BeautifulSoup(driver.page_source, features="lxml")
time.sleep(5)
#author
for meta in artpage.find_all('meta', attrs = {'name' : 'citation_author'}):
rec['autaff'] = [[ meta['content'], publisher ]]
for meta in artpage.find_all('meta'):
if meta.has_attr('name'):
#date
if meta['name'] == 'DC.date':
rec['date'] = meta['content']
#abstract
elif meta['name'] == 'DCTERMS.abstract':
#if meta.has_attr('xml:lang') and meta['xml:lang'] in ['en', 'en_US']:
rec['abs'] = meta['content']
#FFT
elif meta['name'] == 'citation_pdf_url':
rec['FFT'] = meta['content']
#URN
elif meta['name'] == 'DC.identifier':
if meta.has_attr('scheme') and re.search('URI', meta['scheme']):
rec['urn'] = re.sub('.*harvard.edu\/', '', meta['content'])
rec['link'] = meta['content']
else:
rec['note'].append(meta['content'])
#keywords
elif meta['name'] == 'citation_keywords':
rec['keyw'] = re.split('[,;] ', meta['content'])
if not 'urn' in rec.keys():
rec['doi'] = '20.2000/Harvard' + re.sub('.*\/', '', rec['link'])
print ' ', rec.keys()
#closing of files and printing
xmlf = os.path.join(xmldir,jnlfilename+'.xml')
xmlfile = codecs.EncodedFile(codecs.open(xmlf,mode='wb'),'utf8')
ejlmod2.writenewXML(recs,xmlfile,publisher, jnlfilename)
xmlfile.close()
#retrival
retfiles_text = open(retfiles_path,"r").read()
line = jnlfilename+'.xml'+ "\n"
if not line in retfiles_text:
retfiles = open(retfiles_path,"a")
retfiles.write(line)
retfiles.close()
| [
"florian.schwennsen@desy.de"
] | florian.schwennsen@desy.de |
466d4d1bb55b8dafeb39b97860256ff284104ef0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_jugulars.py | 7cd2dde2bba0293c62b58fbfd9c1b89af3e71010 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py |
from xai.brain.wordbase.adjectives._jugular import _JUGULAR
#calss header
class _JUGULARS(_JUGULAR, ):
def __init__(self,):
_JUGULAR.__init__(self)
self.name = "JUGULARS"
self.specie = 'adjectives'
self.basic = "jugular"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
eb724ce8161a951868219e4b96af89a03703ee0a | 1e4c3ea6fadfd2bcffe900a784009e59c9e33202 | /AxiomPro/DisplayingMixerComponent.py | f015a44ce89102f7e1b660ca435c361723864fcf | [] | no_license | mjimserra/AbletonLive9_RemoteScripts | e762e0c761f0af88fc1b9a8b42ef4dec2df02f72 | d08eb29fbf1ac4d12f73841023375059de5ba29a | refs/heads/master | 2021-05-27T21:34:04.962364 | 2014-08-03T22:49:15 | 2014-08-03T22:49:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,479 | py | #Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_static/midi-remote-scripts/AxiomPro/DisplayingMixerComponent.py
from _Framework.ButtonElement import ButtonElement
from _Framework.MixerComponent import MixerComponent
from _Framework.PhysicalDisplayElement import PhysicalDisplayElement
class DisplayingMixerComponent(MixerComponent):
""" Special mixer class that displays the Mute/Solo state of the selected track """
def __init__(self, num_tracks):
MixerComponent.__init__(self, num_tracks)
self._selected_tracks = []
self._display = None
self._mute_button = None
self._solo_button = None
self._register_timer_callback(self._on_timer)
def disconnect(self):
self._unregister_timer_callback(self._on_timer)
self._selected_tracks = None
MixerComponent.disconnect(self)
self._display = None
def set_display(self, display):
raise isinstance(display, PhysicalDisplayElement) or AssertionError
self._display = display
def set_solo_button(self, button):
if not (button == None or isinstance(button, ButtonElement) and button.is_momentary()):
raise AssertionError
self.selected_strip().set_solo_button(button)
if self._solo_button != button:
if self._solo_button != None:
self._solo_button.remove_value_listener(self._solo_value)
self._solo_button = button
self._solo_button != None and self._solo_button.add_value_listener(self._solo_value)
self.update()
def set_mute_button(self, button):
if not (button == None or isinstance(button, ButtonElement) and button.is_momentary()):
raise AssertionError
self.selected_strip().set_mute_button(button)
if self._mute_button != button:
if self._mute_button != None:
self._mute_button.remove_value_listener(self._mute_value)
self._mute_button = button
self._mute_button != None and self._mute_button.add_value_listener(self._mute_value)
self.update()
def _on_timer(self):
sel_track = None
while len(self._selected_tracks) > 0:
track = self._selected_tracks[-1]
if track != None and track.has_midi_input and track.can_be_armed and not track.arm:
sel_track = track
break
del self._selected_tracks[-1]
if sel_track != None:
found_recording_clip = False
song = self.song()
tracks = song.tracks
if song.is_playing:
check_arrangement = song.record_mode
for track in tracks:
if track.can_be_armed and track.arm:
if check_arrangement:
found_recording_clip = True
break
else:
playing_slot_index = track.playing_slot_index
if playing_slot_index in range(len(track.clip_slots)):
slot = track.clip_slots[playing_slot_index]
if slot.has_clip and slot.clip.is_recording:
found_recording_clip = True
break
if found_recording_clip or song.exclusive_arm:
for track in tracks:
if track.can_be_armed and track.arm and track != sel_track:
track.arm = False
sel_track.arm = True
sel_track.view.select_instrument()
self._selected_tracks = []
def _solo_value(self, value):
if not self._solo_button != None:
raise AssertionError
if not value in range(128):
raise AssertionError
if self._display != None and self.song().view.selected_track not in (self.song().master_track, None):
track = value != 0 and self.song().view.selected_track
display_string = str(track.name) + ': Solo '
track.solo and display_string += 'On'
else:
display_string += 'Off'
self._display.display_message(display_string)
else:
self._display.update()
def _mute_value(self, value):
if not self._mute_button != None:
raise AssertionError
if not value in range(128):
raise AssertionError
if self._display != None and self.song().view.selected_track not in (self.song().master_track, None):
track = value != 0 and self.song().view.selected_track
display_string = str(track.name) + ': Mute '
track.mute and display_string += 'On'
else:
display_string += 'Off'
self._display.display_message(display_string)
else:
self._display.update()
def _next_track_value(self, value):
MixerComponent._next_track_value(self, value)
self._selected_tracks.append(self.song().view.selected_track)
def _prev_track_value(self, value):
MixerComponent._prev_track_value(self, value)
self._selected_tracks.append(self.song().view.selected_track) | [
"julien@julienbayle.net"
] | julien@julienbayle.net |
4f706e123529a9d70768fd0c674f57ebc67ba8c0 | ed6625148299e759f39359db9f932dd391b8e86f | /personal_env/lib/python3.8/site-packages/django/template/backends/utils.py | a15e96d77a0902ad37e71d6e5aee17539bde31a4 | [
"MIT"
] | permissive | jestinmwilson/personal-website | 128c4717b21fa6fff9df8295b1137f32bbe44b55 | 6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74 | refs/heads/main | 2023-08-28T11:31:07.916714 | 2021-10-14T09:41:13 | 2021-10-14T09:41:13 | 414,847,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | from django.middleware.csrf import get_token
from django.utils.functional import lazy
from django.utils.html import format_html
from django.utils.safestring import SafeString
def csrf_input(request):
return format_html(
'<input type="hidden" name="csrfmiddlewaretoken" value="{}">',
get_token(request))
csrf_input_lazy = lazy(csrf_input, SafeString, str)
csrf_token_lazy = lazy(get_token, str)
| [
"noreply@github.com"
] | jestinmwilson.noreply@github.com |
ad589e0a99c3938a5c763d820fe4999d6891dd38 | 9f59670ff100632e5a5e24d10a698e50c115dc35 | /devise/utils/tasks.py | 9bbec95328e00ed6d2a57630e9136befaacdfad4 | [] | no_license | mehdidc/reproduction | 7927990c94f6ffee92c16fd550ecf44060b5544d | 63add75dbdda0575bbc59b895092146cb92848e0 | refs/heads/master | 2020-04-28T02:25:33.388228 | 2017-04-06T21:11:39 | 2017-04-06T21:11:39 | 174,897,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from invoke import task
import pickle
@task
def word_embedding_to_binary(filename, out_filename):
words = dict()
with open(filename) as fd:
for line in fd.readlines():
components = line.split(" ")
word = components[0]
embedding = map(float, components[1:])
words[word] = embedding
with open(out_filename, "w") as fd:
pickle.dump(words, fd)
| [
"mehdi@cherti.name"
] | mehdi@cherti.name |
ef72922eb4c2256568f87f0af32022faf169f981 | 020489f1519deb3dd6df459d2b4a853bf64c6278 | /triclelite/scramble/tools/common_tools.py | bfa2db731bbbcd0256f0039d1f06ad2318181bc8 | [] | no_license | reritom/Tricle-Lite | 8d59e58299b19ee355a2153def4d72fb890cb6ab | c01065da770e7723bccb55d7f314f8b4164861d6 | refs/heads/master | 2021-06-03T16:18:41.307052 | 2020-11-13T12:02:17 | 2020-11-13T12:02:17 | 116,177,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | def show_request(request):
'''
This method prints the request data and images. Only to be used in local
'''
print("data: " + str(request.POST))
print(request.FILES)
| [
"reikudjinn@gmail.com"
] | reikudjinn@gmail.com |
d029694c6ea99255d45c0c33dda89431de55aa7f | fe26b42d185c531b38a306fec6d35a6b00f03f88 | /multiprocess-queue.py | 8a8ddaa5d3dc4841792bc41e5e009d025cd59d55 | [] | no_license | szhmery/test-python | 067e15d94c2a214868432cbfc934f0d6c07ec711 | 65627c8dd9b13e6ae803e617ba3df5b7d88f9d27 | refs/heads/master | 2020-03-14T07:24:28.070812 | 2018-07-27T14:36:41 | 2018-07-27T14:36:41 | 131,504,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | import time
from multiprocessing import Process, JoinableQueue, Queue
from random import random
tasks_queue = JoinableQueue()
results_queue = Queue()
def double(n):
return n * 2
def producer(in_queue):
while 1:
wt = random()
time.sleep(wt)
in_queue.put((double, wt))
if wt > 0.9:
in_queue.put(None)
print 'stop producer'
break
def consumer(in_queue, out_queue):
while 1:
task = in_queue.get()
if task is None:
break
func, arg = task
result = func(arg)
in_queue.task_done()
out_queue.put(result)
processes = []
p = Process(target=producer, args=(tasks_queue,))
p.start()
processes.append(p)
p = Process(target=consumer, args=(tasks_queue, results_queue))
p.start()
processes.append(p)
tasks_queue.join()
for p in processes:
p.join()
while 1:
if results_queue.empty():
break
result = results_queue.get()
print 'Result:', result
| [
"szhmery@gmail.com"
] | szhmery@gmail.com |
7be6f2344f4f155b1d863065c960619f6ca0a958 | 03cb73ffb69f2caa0f91b62d99d8694d24c6c932 | /arc/__init__.py | 0e94ed53d9bb541061d79220b060b96dd4f1f249 | [
"MIT"
] | permissive | Den4200/arc | 84f1b69beeb2d1aa6be78a740277772d586127a9 | 55d9c43c0db9f3342ef5b5e8fed429e423ad1f3a | refs/heads/master | 2023-05-10T16:04:21.050895 | 2020-05-03T08:22:38 | 2020-05-03T08:22:38 | 260,573,175 | 1 | 0 | MIT | 2021-06-02T01:43:46 | 2020-05-01T22:49:11 | Python | UTF-8 | Python | false | false | 686 | py | from typing import List, Tuple, Union
class _Keycodes:
"""
Returns multiple keycodes from
a dictionary in one request.
"""
def __init__(self) -> None:
self.keys = {
'enter': 13,
'del': 127,
'backspace': 8,
'esc': 27,
'c': 99,
'x': 120,
's': 115,
'=': 61,
'-': 45
}
def __getitem__(self, items: Union[str, Tuple]) -> List[int]:
if isinstance(items, (int, str)):
items = [items]
return [self.keys[item] for item in items]
KEYS = _Keycodes()
| [
"dpham.42@hotmail.com"
] | dpham.42@hotmail.com |
d5f1ccfa5c7346676dcd4a84e16d05fec5e5019b | f4547c0e47f9f4d4d6ba4fe3f2908094dc0ac511 | /first.py | fbab4d252666861d003ffc3dc71ae819e03456d5 | [] | no_license | gautamamber/python_mongodb- | c63e00be2eb19029d593462c65268c31a2733a18 | fa595d667c9820263256cabf9a6deae07ec70df8 | refs/heads/master | 2021-05-07T15:24:53.143954 | 2017-11-08T17:06:51 | 2017-11-08T17:06:51 | 110,005,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from pymongo import MongoClient
MONGODB_URI = "mongodb://amber:amber@ds155325.mlab.com:55325/amberamity"
client = MongoClient(MONGODB_URI, connectTimeoutMS=30000)
db = client.get_database("amberamity")
user_records = db.user_records
def getRECORD(user_id):
records = user_records.find_one({"user_id":user_id})
return records
def pushRECORD(record):
user_records.insert_one(record)
def updateRecord(record, updates):
user_records.update_one({'_id': record['_id']},{
'$set': updates
}, upsert=False)
record = {
"name": "gautam",
"age": "20",
"class" : "twelth"
}
pushRECORD(record)
| [
"ambergautam1@gmail.com"
] | ambergautam1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.