blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da23b03efa139367d54c2fcb3e57fd0f9d9631df
|
a06586c101a31bf6c9a7dc307cf664120ac092fd
|
/Trakttv.bundle/Contents/Libraries/Shared/shove/stores/bsdb.py
|
0da028a82f4fcbd598ea314ed513ba07efaced6b
|
[] |
no_license
|
HaKDMoDz/Plex-Trakt-Scrobbler
|
22dd1d8275698761cb20a402bce4c5bef6e364f9
|
6d46cdd1bbb99a243b8628d6c3996d66bb427823
|
refs/heads/master
| 2021-01-22T00:10:18.699894
| 2015-05-25T23:52:45
| 2015-05-25T23:52:45
| 37,312,507
| 2
| 0
| null | 2015-06-12T09:00:54
| 2015-06-12T09:00:53
| null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
# -*- coding: utf-8 -*-
'''
Berkeley Source Database Store.
shove's URI for BSDDB stores follows the form:
bsddb://<path>
Where the path is a URL path to a Berkeley database. Alternatively, the native
pathname to a Berkeley database can be passed as the 'engine' parameter.
'''
from threading import Condition
from stuf.six import b
try:
from bsddb import hashopen
except ImportError:
try:
from bsddb3 import hashopen
except ImportError:
raise ImportError('requires bsddb library')
from shove.store import SyncStore
from shove._compat import synchronized
__all__ = ['BSDBStore']
class BSDBStore(SyncStore):
'''Berkeley Source Database-based object storage frontend.'''
init = 'bsddb://'
def __init__(self, engine, **kw):
super(BSDBStore, self).__init__(engine, **kw)
self._store = hashopen(self._engine)
self._lock = Condition()
self.sync = self._store.sync
@synchronized
def __getitem__(self, key):
return self.loads(self._store[key], key)
@synchronized
def __setitem__(self, key, value):
self._store[b(key)] = self.dumps(value)
self.sync()
@synchronized
def __delitem__(self, key):
del self._store[b(key)]
self.sync()
|
[
"gardiner91@gmail.com"
] |
gardiner91@gmail.com
|
957745c63aa4acbb57a1750949c8d2115760bb3c
|
d772869033c47a666622e9ee518bb306db5451a5
|
/unified/modules/main/categories/phone_sms/clicksend_sms/util.py
|
45a36a96360e84f47ec87f916ca256fc31d727b4
|
[] |
no_license
|
funny2code/unified_api
|
920f1e19b2304e331b019f8a531d412b8759e725
|
ffa28ba0e5c0bd8ad7dd44a468e3d1e777bba725
|
refs/heads/main
| 2023-08-31T16:00:17.074427
| 2021-10-04T04:09:45
| 2021-10-04T04:09:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
import clicksend_client
from flask import Response, request
def get_clicksend_configuration(clicksend_sms_headers):
'''Authentication for Clicksend API'''
configuration = clicksend_client.Configuration()
configuration.username = clicksend_sms_headers['username']
configuration.password = clicksend_sms_headers['password']
return configuration
|
[
"baidawardipendar@gmail.com"
] |
baidawardipendar@gmail.com
|
a4b7456e3c8f645d193567dd9cb63fb57ebd7f6d
|
729a5032e812ef85fe09d4e9682105288ab915fc
|
/elit/item/seed.py
|
2946e57dd876c784673d1cc23f34f3c5314d8600
|
[] |
no_license
|
junhg0211/elit
|
d2c60f2325efd37c5c20a5160157c296c8c45d49
|
b6c26f655b1d485b47483c6ef184ff6cb0c226f0
|
refs/heads/main
| 2023-07-18T07:20:26.519724
| 2021-08-30T01:57:08
| 2021-08-30T01:57:08
| 397,745,875
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
from asyncio import TimeoutError as AsyncioTimeoutError
from typing import Tuple
from discord import Embed
from discord.ext.commands import Bot, Context
from elit.exception import ChannelError
from elit.item import Item
from util import message_author_check, irago
class Seed(Item):
name = '씨앗'
type = 3
description = '밭에 농작물을 심을 수 있습니다.'
buy_prise = 2
async def use(self, amount: int, player, bot: Bot, ctx: Context) -> Tuple[str, Embed]:
farm = player.get_farm()
farm_channel = farm.get_channel(bot)
if ctx.channel != farm_channel:
raise ChannelError(f':x: {ctx.author.mention} **여기서는 농작물을 심을 수 없어요!** '
f'{farm_channel.mention}에서 시도해주세요.')
await ctx.send(f':potted_plant: 심을 농작물의 이름을 입력해주세요. 농작물은 __{amount}개__를 심습니다. '
f'(`취소` 또는 `cancel`이라고 입력하면 심기가 취소됩니다.)')
try:
message = await bot.wait_for('message', check=message_author_check(ctx.author), timeout=15)
except AsyncioTimeoutError:
raise AsyncioTimeoutError(f':x: {ctx.author.mention} **작물 심기가 취소되었습니다.** '
f'작물 이름 입력 시간이 초과되었어요...')
else:
if message.content in ('취소', 'cancel'):
raise ValueError(':x: **심기를 취소했습니다.**')
elif message.content in ('자세히', 'specific'):
raise ValueError(f':x: **작물 이름은 __{message.content}__{irago(message.content)} 지을 수 없습니다.**')
crop_name = message.content
if len(crop_name) > 16:
raise ValueError(f':x: {ctx.author.mention} **작물 이름이 너무 길어요!** 작물 이름은 16글자 이내로 지어주세요!')
amount, planted_at = farm.plant(crop_name, amount)
embed = farm.get_planted_crop_by_name(crop_name).get_embed()
return self.apply_use(amount, f':potted_plant: {farm_channel.mention}에 '
f'`{crop_name}` __{amount}개__를 심었습니다.'), embed
def get_prise_per_piece(self) -> int:
return 1
|
[
"junhg0211@gmail.com"
] |
junhg0211@gmail.com
|
8fdd8520b2283971949c09761c0766c98f0e3724
|
fd10bb1c3e08f429a421ea87e73939aea2187227
|
/src/model/modules/feed_forward_block.py
|
0bcba2db8594bdf7b66e406381a84e60e9e2198d
|
[] |
no_license
|
dertilo/speech-recognition-transformer
|
2b9e6336c57eafdbb0dde34c61166a0364f283f4
|
f3c4ef5bae631ce6bbdd9dcc78c529e4d39994ce
|
refs/heads/master
| 2022-12-13T06:04:56.939095
| 2020-07-07T09:39:12
| 2020-07-07T09:39:12
| 257,938,478
| 4
| 1
| null | 2022-11-28T12:48:00
| 2020-04-22T15:14:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,394
|
py
|
import torch as t
from src.model.modules.gelu import Gelu
import torch
from src.model.modules.low_rank_linear import LowRankLinear
class FeedForwardReZeroBlock(t.nn.Module):
"""
feed forward layer combine with add - layer norm - dropout
"""
def __init__(self, input_size, inner_size, dropout, use_low_rank=False):
super(FeedForwardReZeroBlock, self).__init__()
if not use_low_rank:
self.linear1 = t.nn.Linear(input_size, inner_size, bias=True)
t.nn.init.xavier_normal_(self.linear1.weight)
else:
self.linear1 = LowRankLinear(input_size, inner_size, rank=128)
self.gelu = Gelu()
self.dropout = t.nn.Dropout(dropout, inplace=True)
if not use_low_rank:
self.linear2 = t.nn.Linear(inner_size, input_size, bias=True)
t.nn.init.xavier_normal_(self.linear2.weight)
else:
self.linear2 = LowRankLinear(inner_size, input_size, rank=128)
self.dropout = t.nn.Dropout(dropout, inplace=True)
self.rezero_alpha = t.nn.Parameter(t.Tensor([0]))
def forward(self, net):
residual = net
net = self.linear1(net)
net = self.gelu(net)
net = self.dropout(net)
net = self.linear2(net)
net = self.dropout(net)
net = self.rezero_alpha * net
net += residual
return net
class FeedForwardBlock(t.nn.Module):
"""
feed forward layer combine with add - layer norm - dropout
"""
def __init__(self, input_size, inner_size, dropout, use_low_rank=False):
super(FeedForwardBlock, self).__init__()
if not use_low_rank:
self.linear1 = t.nn.Linear(input_size, inner_size, bias=True)
t.nn.init.xavier_normal_(self.linear1.weight)
else:
self.linear1 = LowRankLinear(input_size, inner_size, rank=128)
self.gelu = Gelu()
self.dropout = t.nn.Dropout(dropout, inplace=True)
if not use_low_rank:
self.linear2 = t.nn.Linear(inner_size, input_size, bias=True)
t.nn.init.xavier_normal_(self.linear2.weight)
else:
self.linear2 = LowRankLinear(inner_size, input_size, rank=128)
self.layer_norm = t.nn.LayerNorm(input_size)
self.dropout = t.nn.Dropout(dropout, inplace=True)
def forward(self, net):
residual = net
net = self.layer_norm(net)
net = self.linear1(net)
net = self.gelu(net)
net = self.dropout(net)
net = self.linear2(net)
net = self.dropout(net)
net += residual
return net
class MultiLayeredConv1d(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize MultiLayeredConv1d module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(MultiLayeredConv1d, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, ..., in_chans).
Returns:
Tensor: Batch of output tensors (B, ..., hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
class Conv1dLinear(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinear, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, ..., in_chans).
Returns:
Tensor: Batch of output tensors (B, ..., hidden_chans).
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x))
|
[
"lancertong@live.com"
] |
lancertong@live.com
|
39b67bdf6db8b2bd8a32118189359f4ca09763ca
|
0cf9bb9c50c6efc1bc4a7923f42f6fad79039598
|
/Homeworks/HW 11_ Playing the Game Starter Code/testgame.py
|
56ad1ba84fd6cf3e17dcc8bd4907ab171d691030
|
[] |
no_license
|
AlbMej/CSE-2050-Data-Structures-and-Object-Oriented-Design
|
c950bada185823c70370522e0735533b41bd726b
|
bfbe91d698e650d78c20fd535c45108a8dba1030
|
refs/heads/master
| 2020-04-25T13:20:57.537243
| 2019-03-12T19:54:04
| 2019-03-12T19:54:04
| 172,806,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
from nim import Nim
from gametree import GameTree
from TicTacToe import TicTacToe
import unittest
class TestGameTree(unittest.TestCase):
"""Tests for the GameTree data structure"""
def testinit(self):
"""Test the initializer"""
n1 = Nim([2,3,4])
g1 = GameTree(n1)
n2 = Nim([3,1,4,2])
g2 = GameTree(n2)
def testcurrentwins(self):
"""Test currentwins()"""
n1 = Nim([2,3,4])
g1 = GameTree(n1)
self.assertEqual(g1.currentwins(), True)
n2 = Nim([1,0,1])
g2 = GameTree(n2)
self.assertEqual(g2.currentwins(), True)
n3 = Nim([1,0,1,1])
g3 = GameTree(n3)
self.assertEqual(g3.currentwins(), False)
def testcurrentloses(self):
"""Test currentloses()"""
n1 = Nim([2,3,4])
g1 = GameTree(n1)
self.assertEqual(g1.currentloses(), False)
n2 = Nim([1,0,1])
g2 = GameTree(n2)
self.assertEqual(g2.currentloses(), False)
n3 = Nim([1,0,1,1])
g3 = GameTree(n3)
self.assertEqual(g3.currentloses(), True)
def testcurrentdraws(self):
"""Test currentdraws()"""
n1 = Nim([1,0,1])
g1 = GameTree(n1)
self.assertEqual(g1.currentdraws(), False)
n2 = Nim([1,0,1,1])
g2 = GameTree(n2)
self.assertEqual(g2.currentdraws(), False)
def testtictactoewins(self):
"""Test currentloses() on TicTacToe"""
t1 = TicTacToe('X', 'OO_XX____')
g1 = GameTree(t1)
self.assertEqual(g1.currentwins(), True)
t3 = TicTacToe('X', 'X__X__O_O')
g3 = GameTree(t3)
self.assertEqual(g3.currentwins(), False)
t3 = TicTacToe('O', '____X____')
g3 = GameTree(t3)
self.assertEqual(g3.currentwins(), False)
def testtictactoeloses(self):
"""Test currentwins() on TicTacToe"""
t1 = TicTacToe('X', 'OO_XX____')
g1 = GameTree(t1)
self.assertEqual(g1.currentloses(), False)
t3 = TicTacToe('X', 'X__X__O_O')
g3 = GameTree(t3)
self.assertEqual(g3.currentloses(), True)
t3 = TicTacToe('O', '____X____')
g3 = GameTree(t3)
self.assertEqual(g3.currentloses(), False)
def testtictactoedraws(self):
t1 = TicTacToe('X', 'OO_XX____')
g1 = GameTree(t1)
self.assertEqual(g1.currentdraws(), False)
t3 = TicTacToe('X', 'X__X__O_O')
g3 = GameTree(t3)
self.assertEqual(g3.currentdraws(), False)
t3 = TicTacToe('O', '____X____')
g3 = GameTree(t3)
self.assertEqual(g3.currentdraws(), True)
t4 = TicTacToe('X', 'XOOOXXX__')
g4 = GameTree(t4)
self.assertEqual(g4.currentwins(), True)
if __name__ == '__main__':
unittest.main()
|
[
"albertomejia295@gmail.com"
] |
albertomejia295@gmail.com
|
eefadf6c7a8ed64f11d537d97fad62d617be43b6
|
6f2b98bcfca2e338dba4b313f5cbca3e5ca2e1f6
|
/backend/api/migrations/0034_add_image_deletehash.py
|
a6ea652b4581bac987436aa749d7c4db20c9a3b4
|
[
"MIT"
] |
permissive
|
Disfactory/Disfactory
|
c9187d73607d359a18206b42b3e7e4727f62e0ae
|
54163d8c14e9b67493a0354175cfe87a0f98d823
|
refs/heads/master
| 2023-07-24T19:22:28.963065
| 2023-07-17T10:54:50
| 2023-07-17T10:54:50
| 209,294,385
| 50
| 22
|
MIT
| 2023-08-26T07:55:06
| 2019-09-18T11:43:33
|
Python
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
# Generated by Django 2.2.13 on 2021-03-17 13:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0033_remove_factory_point'),
]
operations = [
migrations.AddField(
model_name='image',
name='deletehash',
field=models.TextField(blank=True, help_text='delete hash', null=True),
),
]
|
[
"swind@cloudmosa.com"
] |
swind@cloudmosa.com
|
253eb447069949259264040ad4f04fa3c929cca0
|
acff427a36d6340486ff747ae9e52f05a4b027f2
|
/playground/narcisse/vstream-client/actions.py
|
2fcb4cc9c1c59b88cf51530ab7e633160d07a0a4
|
[] |
no_license
|
jeremie1112/pisilinux
|
8f5a03212de0c1b2453132dd879d8c1556bb4ff7
|
d0643b537d78208174a4eeb5effeb9cb63c2ef4f
|
refs/heads/master
| 2020-03-31T10:12:21.253540
| 2018-10-08T18:53:50
| 2018-10-08T18:53:50
| 152,126,584
| 2
| 1
| null | 2018-10-08T18:24:17
| 2018-10-08T18:24:17
| null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir="vstream-client-%s" % get.srcVERSION()
def setup():
shelltools.export("OS_CXXFLAGS", "%s -fno-strict-aliasing" % get.CXXFLAGS())
shelltools.export("CFLAGS", "%s" % get.CFLAGS())
shelltools.system("./configure --prefix=/usr")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
|
[
"erkanisik@yahoo.com"
] |
erkanisik@yahoo.com
|
782421c6cfd319b5bc114598b6cfb8469740d481
|
ad2704933de4502ae9de91e6d915f9dbe010b446
|
/kurosawa/chapter02/knock18.py
|
cd6c994ed7a88d77e47aa5bf1eadbd204aeae0e7
|
[] |
no_license
|
tmu-nlp/100knock2017
|
266e68917d8d5a7f5d0c064f1bc2da5fa402a253
|
629bd1155d0fe78cd9302ae9a7cdf0922b778fe7
|
refs/heads/master
| 2021-01-19T17:36:53.328997
| 2017-07-24T07:09:54
| 2017-07-24T07:09:54
| 88,334,932
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
with open('hightemp.txt','r') as f:
col = []
for list1 in f:
list1 = list1.split()
col.append(list1)
for i in sorted(col, key=lambda temp: temp[2]):
print("%s\t%s\t%s\t%s" %(i[0],i[1],i[2],i[3]))
# sort -k3 hightemp.txt
|
[
"michiki@Michiki-no-MacBook-Pro.local"
] |
michiki@Michiki-no-MacBook-Pro.local
|
859c6751bcaac2d3846b424c0d80a24f60795267
|
81f2825e5bc73bcdaadb00570d8a8607974af3af
|
/scratch_42.py
|
efa6e3f7b33d10a97f8b038a9064310cf9de8fbb
|
[] |
no_license
|
PrakharBansal24/Assignment-1
|
26fc316fe4bd8d2482ef34f147ba241fc2b20d80
|
927239e57309ca3ca794631eb61faf16784b9bf2
|
refs/heads/master
| 2022-11-23T20:28:30.628768
| 2020-07-27T05:39:30
| 2020-07-27T05:39:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
def printFrequency(strr):
M = {}
word = ""
for i in range(len(strr)):
if (strr[i] == ' '):
if (word not in M):
M[word] = 1
word = ""
else:
M[word] += 1
word = ""
else:
word += strr[i]
if (word not in M):
M[word] = 1
else:
M[word] += 1
for it in M:
print(it, "-", M[it])
strr = "Apple Apple boy boy boy boy token token frequency"
printFrequency(strr)
|
[
"you@example.com"
] |
you@example.com
|
996906dd39fb3529cc39d2ec310d939fa819d3ed
|
e18222344f78f65e5a52480fa24b4720a1d4e36b
|
/tests/test_appsync.py
|
8281801fa43e093c266aa15a27d2af81b3a9f707
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
vllrsatish/troposphere
|
177c34fac39f668eda8c2baaed19ae1a6a05964b
|
5ec03f329f2a91d3bb970ef0df7cf6232dccde16
|
refs/heads/master
| 2023-03-24T23:18:40.304921
| 2021-03-21T12:06:08
| 2021-03-21T16:31:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
import unittest
from troposphere.appsync import Resolver, PipelineConfig
class TestAppsyncResolver(unittest.TestCase):
def test_resolver_kind_bad_value(self):
with self.assertRaisesRegex(ValueError, 'Kind must be one of'):
Resolver(
'MutationField',
DataSourceName='SomeDatasource',
FieldName='Field',
TypeName='Mutation',
ApiId='some_api_id',
Kind='SOME_KIND',
PipelineConfig=PipelineConfig(
Functions=['FunctionId1', 'FunctionId']
),
RequestMappingTemplateS3Location=('s3://bucket/key.req.vtl'),
ResponseMappingTemplateS3Location=('s3://bucket/key.res.vtl')
)
def test_resolver(self):
Resolver(
'MutationField',
DataSourceName='SomeDatasource',
FieldName='Field',
TypeName='Mutation',
ApiId='some_api_id',
Kind='PIPELINE',
PipelineConfig=PipelineConfig(
Functions=['FunctionId1', 'FunctionId']
),
RequestMappingTemplateS3Location=('s3://bucket/key.req.vtl'),
ResponseMappingTemplateS3Location=('s3://bucket/key.res.vtl')
)
Resolver(
'MutationField',
DataSourceName='SomeDatasource',
FieldName='Field',
TypeName='Mutation',
ApiId='some_api_id',
Kind='UNIT',
RequestMappingTemplateS3Location=('s3://bucket/key.req.vtl'),
ResponseMappingTemplateS3Location=('s3://bucket/key.res.vtl')
)
|
[
"mark@peek.org"
] |
mark@peek.org
|
675e595f5196864d3257979b47697dfddbd5e4e4
|
9e1ee20e89229869b42cd5deceeb24ce7790b721
|
/aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/PreCheckCreateOrderForTempUpgradeRequest.py
|
9f2b126d63cd827d8382f32125666a242dfb382c
|
[
"Apache-2.0"
] |
permissive
|
guwenbo/aliyun-openapi-python-sdk
|
7503ed8f50897ea1ad7bdb390e140a2e570e30b8
|
ef4f34e7e703ef2ddfdcb1f57573b9b14be77e0d
|
refs/heads/master
| 2020-09-23T04:44:06.134661
| 2019-12-02T12:52:51
| 2019-12-02T12:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,079
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class PreCheckCreateOrderForTempUpgradeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'PreCheckCreateOrderForTempUpgrade','rds')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_DBInstanceStorage(self):
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self,DBInstanceStorage):
self.add_query_param('DBInstanceStorage',DBInstanceStorage)
def get_NodeType(self):
return self.get_query_params().get('NodeType')
def set_NodeType(self,NodeType):
self.add_query_param('NodeType',NodeType)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_EffectiveTime(self):
return self.get_query_params().get('EffectiveTime')
def set_EffectiveTime(self,EffectiveTime):
self.add_query_param('EffectiveTime',EffectiveTime)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_DBInstanceStorageType(self):
return self.get_query_params().get('DBInstanceStorageType')
def set_DBInstanceStorageType(self,DBInstanceStorageType):
self.add_query_param('DBInstanceStorageType',DBInstanceStorageType)
def get_BusinessInfo(self):
return self.get_query_params().get('BusinessInfo')
def set_BusinessInfo(self,BusinessInfo):
self.add_query_param('BusinessInfo',BusinessInfo)
def get_AutoPay(self):
return self.get_query_params().get('AutoPay')
def set_AutoPay(self,AutoPay):
self.add_query_param('AutoPay',AutoPay)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_Resource(self):
return self.get_query_params().get('Resource')
def set_Resource(self,Resource):
self.add_query_param('Resource',Resource)
def get_CommodityCode(self):
return self.get_query_params().get('CommodityCode')
def set_CommodityCode(self,CommodityCode):
self.add_query_param('CommodityCode',CommodityCode)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_UsedTime(self):
return self.get_query_params().get('UsedTime')
def set_UsedTime(self,UsedTime):
self.add_query_param('UsedTime',UsedTime)
def get_DBInstanceClass(self):
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self,DBInstanceClass):
self.add_query_param('DBInstanceClass',DBInstanceClass)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
e2ec73ce9a92cc11f091aee17aacaa3ceb1eb9c9
|
7b79deca597eee678b521b808674948fc333fd40
|
/Nanodet/client2.py
|
c5e1ccef29b6d952475a220940624ade856836a8
|
[] |
no_license
|
GitZzw/IERCAR
|
bfd4481ce1d1994a36f0587876c970b60f08d1c3
|
cd3115b89f4b69a9adb2c26e412c0659bfa68aa6
|
refs/heads/master
| 2023-02-19T03:09:16.040070
| 2021-01-23T13:44:39
| 2021-01-23T13:44:39
| 332,219,359
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,537
|
py
|
#!/usr/bin/python
# coding: utf-8
import math
import socket
import rospy
import threading
import time
from std_msgs.msg import Float32
from geometry_msgs.msg import PoseStamped
global target_corner_msg
global t2
global flag
def callback(data):
global t2
global flag
global target_corner_msg
target_corner_msg = PoseStamped()
target_corner_msg.header.stamp = rospy.Time.now()
target_corner_msg.pose.position.y = data.data
if(flag == True):
flag = False
t2 = threading.Thread(target=tcpip)
t2.start()
def client():
global flag
flag = True
rospy.init_node('client', anonymous=True)
rospy.Subscriber("trans", Float32, callback)
rospy.spin()
def tcpip():
yolo_target_pub = rospy.Publisher('yolo_target_corner', PoseStamped, queue_size=1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 连接服务端
print ('connect state: ', s.connect_ex(('127.0.0.1', 8000)))
while True:
receive_msg = s.recv(39).decode()
#print(len(receive_msg))
msg = receive_msg.split(',')
if msg[0] == '1':
""" QuaternionStamped.x, y, z, w = xmin, ymin, xmax, ymax, x = time, y= distance """
cx = 317.657
cy = 234.635
f = 610.5250244140625
xmin = float(msg[1])
ymin = float(msg[2])
xmax = float(msg[3])
ymax = float(msg[4])
px = (xmin + xmax)/2
deltax = px-cx
py = (ymin + ymax)/2
deltay = py-cy
dis = target_corner_msg.pose.position.y
disz = dis/math.sqrt((abs(deltax)/f)*(abs(deltax)/f)+(abs(deltay)/f)*(abs(deltay)/f)+1)
disx = disz*deltax/f
disy = disz*deltay/f
target_corner_msg.pose.orientation.x = disx*100
target_corner_msg.pose.orientation.y = disy*100
target_corner_msg.pose.orientation.z = disz*100
target_corner_msg.pose.position.x = float(msg[5]) #time
#print(time.time()-target_corner_msg.pose.position.x)
# else:
# # print (" target not found ... ")
# target_corner_msg.pose.orientation.x = 0
# target_corner_msg.pose.orientation.y = 0
# target_corner_msg.pose.orientation.z = 0
# target_corner_msg.pose.orientation.w = 0
# target_corner_msg.pose.position.x = -1
yolo_target_pub.publish(target_corner_msg)
if __name__ == "__main__":
client()
|
[
"120137016@qq.com"
] |
120137016@qq.com
|
7acee8deb1bf7f07bb324573b18412c4e2c80892
|
cb8c62659f9509bbc01237a09cf8730b57f4a84f
|
/Webopedia/__init__.py
|
e27402d16322110945ef906d20fd6ce678573c79
|
[] |
no_license
|
stepnem/supybot-plugins
|
5bd795319036ab21cd81b00a23e0c1f712876d3e
|
6838f7ae22ad1905272cf7e003fb803e637c87d8
|
refs/heads/master
| 2021-01-01T18:49:44.478383
| 2012-01-05T04:14:24
| 2012-01-05T04:14:24
| 281,407
| 8
| 4
| null | 2016-11-01T20:15:17
| 2009-08-18T21:55:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,965
|
py
|
###
# Copyright (c) 2004, Kevin Murphy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Provides commands and snarfers for the webopedia.com technical term dictionary
site.
"""
import supybot
__author__ = supybot.authors.skorobeus
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
Class = plugin.Class
configure = config.configure
|
[
"stepnem@gmail.com"
] |
stepnem@gmail.com
|
b7080a3388fa6748b93fdbe2e00ad522869923bb
|
5ec48e90f711c9514a6d2ee36dbb46bc1ba71b74
|
/accounts/migrations/0005_alter_user_zipcode.py
|
ce51f2e740d31fc46683c04ef22c1913cba2642e
|
[] |
no_license
|
hanieh-mav/hanieh_shop
|
1ca5042fefb970459d9f48fb716a95fec6a530bb
|
b7cf253e11b6c167e78b245f253a8d057f435026
|
refs/heads/main
| 2023-06-10T16:37:26.385048
| 2021-07-07T14:19:58
| 2021-07-07T14:19:58
| 372,892,835
| 2
| 0
| null | 2021-07-07T14:19:59
| 2021-06-01T16:19:48
|
CSS
|
UTF-8
|
Python
| false
| false
| 437
|
py
|
# Generated by Django 3.2.4 on 2021-06-26 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_remove_user_shahr'),
]
operations = [
migrations.AlterField(
model_name='user',
name='zipcode',
field=models.CharField(blank=True, max_length=10, null=True, verbose_name='کدپستی'),
),
]
|
[
"h.mehdiabadi@gmail.com"
] |
h.mehdiabadi@gmail.com
|
feca44382f1c83fcd137aacf6ceaefb3ddc82150
|
9795e787a54d15f2f249a17b616fec3df67d4559
|
/exception/exceptions.py
|
f737f34a31e603fd34902bda5585322969e15d34
|
[] |
no_license
|
gebbz03/PythonProject
|
377b6ccf5eafa37dd157012ce499138370ba882f
|
c12f939cf194a4c541ee77e1f614ba9867ef7090
|
refs/heads/master
| 2020-04-02T22:16:11.082863
| 2018-10-30T05:49:22
| 2018-10-30T05:49:22
| 154,827,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
#try and catch block
def div(x,y):
try:
result=x/y
except ZeroDivisionError:
print("Cannot divide by zero")
return None
except Exception as e:
print("Error occured",e)
return None
return result
print(div(4,2))
print(div(4,0))
print(div('1','2'))
|
[
"gebb.freelancer@gmail.com"
] |
gebb.freelancer@gmail.com
|
41fdbe9ba825572f3d44dfbc1f9d27d1ef7a631d
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/verification_ip_flow_parameters_py3.py
|
dd9ba7b70c0b60664f4e70b0301a8b45e220e3c8
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,706
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowParameters(Model):
"""Parameters that define the IP flow to be verified.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the target resource to
perform next-hop on.
:type target_resource_id: str
:param direction: Required. The direction of the packet represented as a
5-tuple. Possible values include: 'Inbound', 'Outbound'
:type direction: str or ~azure.mgmt.network.v2017_10_01.models.Direction
:param protocol: Required. Protocol to be verified on. Possible values
include: 'TCP', 'UDP'
:type protocol: str or ~azure.mgmt.network.v2017_10_01.models.Protocol
:param local_port: Required. The local port. Acceptable values are a
single integer in the range (0-65535). Support for * for the source port,
which depends on the direction.
:type local_port: str
:param remote_port: Required. The remote port. Acceptable values are a
single integer in the range (0-65535). Support for * for the source port,
which depends on the direction.
:type remote_port: str
:param local_ip_address: Required. The local IP address. Acceptable values
are valid IPv4 addresses.
:type local_ip_address: str
:param remote_ip_address: Required. The remote IP address. Acceptable
values are valid IPv4 addresses.
:type remote_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP
forwarding is enabled on any of them, then this parameter must be
specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'direction': {'required': True},
'protocol': {'required': True},
'local_port': {'required': True},
'remote_port': {'required': True},
'local_ip_address': {'required': True},
'remote_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'local_port': {'key': 'localPort', 'type': 'str'},
'remote_port': {'key': 'remotePort', 'type': 'str'},
'local_ip_address': {'key': 'localIPAddress', 'type': 'str'},
'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(self, *, target_resource_id: str, direction, protocol, local_port: str, remote_port: str, local_ip_address: str, remote_ip_address: str, target_nic_resource_id: str=None, **kwargs) -> None:
super(VerificationIPFlowParameters, self).__init__(**kwargs)
self.target_resource_id = target_resource_id
self.direction = direction
self.protocol = protocol
self.local_port = local_port
self.remote_port = remote_port
self.local_ip_address = local_ip_address
self.remote_ip_address = remote_ip_address
self.target_nic_resource_id = target_nic_resource_id
|
[
"noreply@github.com"
] |
lmazuel.noreply@github.com
|
d72a310d68e97683a91711d371b79255141f523c
|
76d4d6f4edb3216ade81ba1d1b98ef17a1b9baa9
|
/transactions/views.py
|
c3470d056a65b14161f48f361cc74ca9705c83a5
|
[] |
no_license
|
phemmylintry/crypto
|
8455c4ed6fda14bf49fdad9527cb6de4134498d6
|
390816f8152514446d063728b7428d6633739855
|
refs/heads/main
| 2023-03-22T07:51:35.454040
| 2021-03-10T04:25:04
| 2021-03-10T04:25:04
| 340,151,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,819
|
py
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.authentication import TokenAuthentication
from django.contrib.auth import get_user_model
from django_q.tasks import async_task, result
from drf_spectacular.utils import extend_schema, OpenApiParameter, OpenApiExample
from drf_spectacular.types import OpenApiTypes
from .serializers import TransactionSerializer, TransactionListSerializer
from .models import Transaction
from .tasks import send_transaction
import uuid
User = get_user_model()
class TransactionView(generics.CreateAPIView):
queryset = Transaction.objects.all()
serializer_class = TransactionSerializer
permission_classes = (IsAuthenticated, )
authenctication_classes = (TokenAuthentication, )
@extend_schema(
request=TransactionSerializer,
responses={201: TransactionSerializer},
)
def create(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data, context={'request': request})
serializer.is_valid(raise_exception=True)
transaction = self.perform_create(serializer)
if transaction == "success":
get_transaction_id = serializer.data['transaction_ref']
transact = Transaction.objects.get(transaction_ref=get_transaction_id)
transact.state = "success"
transact.save(update_fields=['state'])
else:
return Response ({
'status' : "Tansaction not succesful",
'data' : {
'transaction_ref' : serializer.data['transaction_ref']
}
})
#update transaction state :(
headers = self.get_success_headers(serializer.data)
return Response({
'status' : "Transaction is successful.",
'data' : {
'transaction_ref' : serializer.data['transaction_ref']
}
}, status=status.HTTP_201_CREATED)
def perform_create(self, serializer):
currency_type = serializer.validated_data['currency_type']
target_user = serializer.validated_data['target_user']
get_target_user = User.objects.get(id=target_user)
serializer.validated_data['target_user'] = get_target_user
#generate randome id for transaction token
transaction_ref = uuid.uuid4()
serializer.validated_data['transaction_ref'] = transaction_ref
source_user = self.request.user
serializer.validated_data['source_user'] = source_user
serializer.save()
target_user = serializer.data['target_user']
source_user = serializer.data['source_user']
currency_type = serializer.data['currency_type']
transfer_amount = serializer.data['currency_amount']
task = async_task('transactions.tasks.send_transaction', source_user, target_user, currency_type, transfer_amount)
# task = send_transaction.apply_async((source_user, target_user, currency_type, transfer_amount), countdown=2)
results = result(task, 200)
print(results)
return results
class TransactionListView(APIView):
permission_classes = (IsAuthenticated, )
authenctication_classes = (TokenAuthentication, )
@extend_schema(
request=TransactionListSerializer,
responses={201: TransactionListSerializer},
)
def get(self, request, format='json'):
user = request.user.id
if not user:
return Response({
"status" : "Error",
"data" : {
"message" : "Invalid user"
}
}, status=status.HTTP_400_BAD_REQUEST)
transactions = Transaction.objects.all()
data = []
for items in transactions:
if items.source_user_id == user or items.target_user_id == user:
data.append({
'transaction_ref' : items.transaction_ref,
'currency_amount' : items.currency_amount,
'currency_type' : items.currency_type,
'source_user_id' : items.source_user_id,
'target_user_id' : items.target_user_id,
'state' : items.state,
'time_of_transaction': items.timestamp_created
})
if data == []:
return Response(
{
"data" : "No transaction history"
}, status=status.HTTP_200_OK)
return Response(data, status=status.HTTP_200_OK)
|
[
"phemmylintry@gmail.com"
] |
phemmylintry@gmail.com
|
017385197b19ce7d53a1c71903d01f34549125f6
|
d532b85841b459c61d88d380e88dd08d29836d43
|
/solutions/922_sort_array_by_parity_ii.py
|
da1ab4097aaced49495bc27465f04ae81e4854cc
|
[
"MIT"
] |
permissive
|
YiqunPeng/leetcode_pro
|
ad942468df5506de9dc48a4019933f658e2a3121
|
4a508a982b125a3a90ea893ae70863df7c99cc70
|
refs/heads/master
| 2022-05-15T09:32:02.699180
| 2022-05-14T16:32:17
| 2022-05-14T16:32:17
| 182,453,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
"""Array.
Running time: O(n) where n is the length of A.
"""
even, odd = [], []
for a in A:
if a % 2 == 1:
odd.append(a)
else:
even.append(a)
res = []
for i in range(len(A)):
if i % 2 == 1:
res.append(odd.pop())
else:
res.append(even.pop())
return res
|
[
"ypeng1@andrew.cmu.edu"
] |
ypeng1@andrew.cmu.edu
|
61698d866bd746910d1e197d4205bbdc4be3429a
|
cd2d3b6be41eb9b96ecc3a22dc730325c21f22e6
|
/charalog/log/woals.cgi
|
00aadbd636a0b7515b9ff6fd8600b2962811ba09
|
[] |
no_license
|
cappuu/TC
|
c61f235349e9a68d472fa85bbea1adbef3ea154a
|
def08d09219e11bee2135f6b796569b769ee21c1
|
refs/heads/master
| 2021-09-10T19:37:33.847161
| 2018-03-31T22:56:05
| 2018-03-31T22:56:05
| 124,523,296
| 0
| 0
| null | null | null | null |
UHC
|
Python
| false
| false
| 1,999
|
cgi
|
12월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(16일0시14분)
11월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일23시17분)
10월 : 사비의 방어시설을 <font color=red>+14</font> 강화했습니다.(15일22시15분)
9월 : 사비의 방어시설을 <font color=red>+10</font> 강화했습니다.(15일21시14분)
8월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일20시14분)
7월 : 사비의 방어시설을 <font color=red>+10</font> 강화했습니다.(15일19시14분)
7월 : 수확으로 <font color=red>3751</font>의 식량을 수확했습니다. [봉토추가봉록:51](15일19시14분)
6월 : <font color=red>[상승] </font>:평강의 통솔력이 1포인트 올랐다.(15일18시15분)
6월 : 사비의 방어시설을 <font color=red>+8</font> 강화했습니다.(15일18시15분)
5월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일17시14분)
4월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일16시14분)
3월 : 사비의 방어시설을 <font color=red>+8</font> 강화했습니다.(15일15시14분)
2월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일14시14분)
1월 : 숙박하여 피로를 대폭 회복하였습니다.(15일13시16분)
1월 : 세금으로 <font color=red>4341</font>의 돈을 징수했습니다. [관직추가봉록:400] [봉토추가봉록:241](15일13시16분)
12월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일12시14분)
11월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일11시16분)
10월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일10시14분)
9월 : 사비의 방어시설을 <font color=red>+11</font> 강화했습니다.(15일9시15분)
8월 : 사비의 방어시설을 <font color=red>+12</font> 강화했습니다.(15일8시14분)
|
[
"lu2447315@gmail.com"
] |
lu2447315@gmail.com
|
3f2822cf8074a1923bebb0ea6f5d14b816b76656
|
dde1cf596cf5969812ecda999828baa9c73e788d
|
/test/test_snapshot_alias_extended.py
|
6bca0e0b2c31ef9b79fad88ed829b0806416cbaa
|
[] |
no_license
|
dctalbot/isilon_sdk_python3.7
|
bea22c91096d80952c932d6bf406b433af7f8e21
|
4d9936cf4b9e6acbc76548167b955a7ba8e9418d
|
refs/heads/master
| 2020-04-25T20:56:45.523351
| 2019-02-28T19:32:11
| 2019-02-28T19:32:11
| 173,065,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.snapshot_alias_extended import SnapshotAliasExtended # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestSnapshotAliasExtended(unittest.TestCase):
"""SnapshotAliasExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSnapshotAliasExtended(self):
"""Test SnapshotAliasExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.snapshot_alias_extended.SnapshotAliasExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"dctalbot@umich.edu"
] |
dctalbot@umich.edu
|
5f92a9568bee1058fc272d28084d6c7ad10f802b
|
9b45d301869631cf464da34eadf5ddb96ce80ae2
|
/annotations/subsample_json_annotations.py
|
92590f3a15f62c2c1ad6db9dce734c2926bcc825
|
[] |
no_license
|
zhanght021/segment-any-moving
|
df6605bfee4bb9c6f76f3e09d38a493914eb5750
|
a72f1afd9f52bc9151221112dbc8a8fc0891807e
|
refs/heads/master
| 2020-12-02T15:30:12.301579
| 2019-12-22T20:24:02
| 2019-12-22T20:24:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
import argparse
import json
import logging
import random
from pathlib import Path
from utils.log import setup_logging
def main():
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-json', required=True)
parser.add_argument('--output-json', required=True)
parser.add_argument('--keep-num-images', type=int, required=True)
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
random.seed(args.seed)
input_path = Path(args.input_json)
output_path = Path(args.output_json)
log_path = args.output_json + '.log'
setup_logging(log_path)
logging.info('Args:\n%s' % vars(args))
assert input_path.exists()
assert not output_path.exists()
with open(input_path, 'r') as f:
data = json.load(f)
image_ids = [x['id'] for x in data['images']]
import collections
ids_count = collections.Counter(image_ids)
repeated = {x: y for x, y in ids_count.items() if y > 1}
random.shuffle(image_ids)
kept_image_ids = set(image_ids[:args.keep_num_images])
__import__('ipdb').set_trace()
subsampled_images = [
x for x in data['images'] if x['id'] in kept_image_ids
]
subsampled_annotations = [
x for x in data['annotations'] if x['image_id'] in kept_image_ids
]
logging.info(
'Kept %s/%s images' % (len(subsampled_images), len(data['images'])))
logging.info('Kept %s/%s annotations' % (len(subsampled_annotations),
len(data['annotations'])))
data['images'] = subsampled_images
data['annotations'] = subsampled_annotations
with open(output_path, 'w') as f:
json.dump(data, f)
if __name__ == "__main__":
main()
|
[
"achalddave@live.com"
] |
achalddave@live.com
|
9fc96baae700f71e09894a414eeaf395736030fc
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/765.py
|
e2bb292960f894265919bae5d4515259ff95dcdb
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
__________________________________________________________________________________________________
sample 28 ms submission
class Solution(object):
def minSwapsCouples(self, row):
ans = 0
for i in range(0, len(row), 2):
x = row[i]
if row[i+1] == x^1: continue
ans += 1
for j in range(i+1, len(row)):
if row[j] == x^1:
row[i+1], row[j] = row[j], row[i+1]
break
return ans
__________________________________________________________________________________________________
sample 13208 kb submission
class UnionFind:
def __init__(self, N):
self.parents = [i for i in range(N)]
self.count = 0
def find(self, x):
if self.parents[x] == x:
return x
return self.find(self.parents[x])
def union(self, x, y):
px = self.find(x)
py = self.find(y)
if px != py:
self.count += 1
self.parents[py] = px
class Solution:
def minSwapsCouples(self, row: List[int]) -> int:
N = len(row) // 2
UF = UnionFind(N)
for i in range(N):
x_couple = row[i * 2] // 2
y_couple = row[i * 2 + 1] // 2
if x_couple != y_couple:
UF.union(x_couple, y_couple)
return UF.count
__________________________________________________________________________________________________
|
[
"strengthen@users.noreply.github.com"
] |
strengthen@users.noreply.github.com
|
be8dd059ed81f4842d06142a8a046d206f83a4eb
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/app/waterQual/basinAll/tsMapSeq.py
|
5bfffb5cc94cf4e8469788d0e47b6a2dd1db36cc
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,278
|
py
|
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataName = 'basinAll'
wqData = waterQuality.DataModelWQ('basinAll')
outName = 'basinAll-Y8090-opt1'
trainset = 'Y8090'
testset = 'Y0010'
# point test
outFolder = os.path.join(kPath.dirWQ, 'model', outName)
yP1, ycP1 = basins.testModel(outName, trainset, wqData=wqData, ep=200)
errFile1 = os.path.join(outFolder, 'errMat1_ep200.npy')
# errMat1 = wqData.errBySiteC(ycP1, subset=trainset, varC=wqData.varC)
# np.save(errFile1, errMat1)
errMat1 = np.load(errFile1)
errFile2 = os.path.join(outFolder, 'errMat2_ep200.npy')
yP2, ycP2 = basins.testModel(outName, testset, wqData=wqData, ep=200)
# errMat2 = wqData.errBySiteC(ycP2, subset=testset, varC=wqData.varC)
# np.save(errFile2, errMat2)
errMat2 = np.load(errFile2)
# seq test
siteNoLst = wqData.info['siteNo'].unique().tolist()
# basins.testModelSeq(outName, siteNoLst, wqData=wqData, ep=200)
# figure out number of samples
dirInv = os.path.join(kPath.dirData, 'USGS', 'inventory')
fileSiteNo = os.path.join(dirInv, 'siteNoLst-1979')
siteNoLstAll = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
df0 = pd.read_csv(os.path.join(dirInv, 'codeCount.csv'),
dtype={'siteNo': str}).set_index('siteNo')
df1 = pd.read_csv(os.path.join(dirInv, 'codeCount_B2000.csv'),
dtype={'siteNo': str}).set_index('siteNo')
df2 = pd.read_csv(os.path.join(dirInv, 'codeCount_A2000.csv'),
dtype={'siteNo': str}).set_index('siteNo')
matN = df0.loc[siteNoLst].values
matN1 = df1.loc[siteNoLst].values
matN2 = df2.loc[siteNoLst].values
# plot box
codePdf = usgs.codePdf
groupLst = codePdf.group.unique().tolist()
for group in groupLst:
codeLst = codePdf[codePdf.group == group].index.tolist()
indLst = [wqData.varC.index(code) for code in codeLst]
labLst1 = [codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
labLst2 = ['train opt1', 'test opt1', 'train opt2', 'test opt2']
dataBox = list()
for ic in indLst:
temp = list()
for errMat in [errMat1, errMat2]:
ind = np.where((matN1[:, ic] > 50) & (matN2[:, ic] > 50))[0]
temp.append(errMat[ind, ic, 1])
dataBox.append(temp)
title = 'correlation of {} group'.format(group)
fig = figplot.boxPlot(dataBox, label1=labLst1, label2=labLst2)
fig.suptitle(title)
fig.show()
# plot map
siteNoLst = wqData.info['siteNo'].unique().tolist()
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
codePdf = usgs.codePdf
codeLst = ['00940', '00915']
def funcMap():
nM = len(codeLst)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
for k in range(0, nM):
code = codeLst[k]
ic = wqData.varC.index(code)
shortName = codePdf.loc[code]['shortName']
title = '{} {}'.format(shortName, code)
axplot.mapPoint(axM[k], lat, lon, errMat2[:, ic, 1], s=12)
axM[k].set_title(title)
figP, axP = plt.subplots(nM+1, 1, figsize=(8, 6))
return figM, axM, figP, axP, lon, lat
def funcPoint(iP, axP):
siteNo = siteNoLst[iP]
dfPred, dfObs = basins.loadSeq(outName, siteNo, ep=200)
dfPred = dfPred[dfPred.index >= np.datetime64('1980-01-01')]
dfObs = dfObs[dfObs.index >= np.datetime64('1980-01-01')]
t = dfPred.index.values.astype(np.datetime64)
tBar = np.datetime64('2000-01-01')
axplot.plotTS(axP[0], t, [dfPred['00060'], dfObs['00060']], tBar=tBar,
legLst=['pred', 'obs'], styLst='--', cLst='br')
axP[0].set_title('streamflow')
for k, var in enumerate(codeLst):
styLst = '-*'
shortName = codePdf.loc[var]['shortName']
title = ' {} {}'.format(shortName, var)
axplot.plotTS(axP[k+1], t, [dfPred[var], dfObs[var]], tBar=tBar,
legLst=['pred', 'obs'], styLst=styLst, cLst='br')
axP[k+1].set_title(title)
figplot.clickMap(funcMap, funcPoint)
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
b0a7e19fb390fa57b3835fc1e4f1ca42566c3f7d
|
efd471380d976614667e56c92f0aed671371fc63
|
/All Programs/Tuples.py
|
7c2c7e039ad6d52062ffa5f8b2189cb68d4273cf
|
[] |
no_license
|
anshumanairy/Hacker-Rank
|
39af46e76182d34637340d1755aff4afd7820083
|
6fef4c6a415422d9379232932358e4ee7430a6af
|
refs/heads/master
| 2021-07-04T07:41:37.769152
| 2020-10-12T05:49:24
| 2020-10-12T05:49:24
| 181,359,750
| 2
| 2
| null | 2020-10-12T05:49:25
| 2019-04-14T19:38:18
|
Python
|
UTF-8
|
Python
| false
| false
| 201
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
def func():
N=int(input())
list1=[""]*N
c=input("")
list1=list(map(int,c.split()))
print(hash(tuple(list1)))
func()
# In[ ]:
|
[
"anshuman.airy04@gmail.com"
] |
anshuman.airy04@gmail.com
|
85a39828733a6f7bfe8b8897c68b984eaf80db3c
|
7d549faf0de691a63acae85e60b081d4b6b7ddc7
|
/slowfast/datasets/__init__.py
|
dee63427c45337ec1e5ac384762abd5fb36e5d31
|
[
"Apache-2.0"
] |
permissive
|
billcai/SlowFast
|
be05f7852810d43211c4e6ab7faef27f86d035af
|
778888e63351e55861801996b37c7ff9a3746587
|
refs/heads/master
| 2021-08-01T17:02:11.539218
| 2021-07-26T22:05:16
| 2021-07-26T22:06:15
| 248,907,066
| 0
| 0
|
Apache-2.0
| 2020-03-21T04:34:41
| 2020-03-21T04:34:40
| null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .ava_dataset import Ava # noqa
from .build import DATASET_REGISTRY, build_dataset # noqa
from .charades import Charades # noqa
from .imagenet import Imagenet # noqa
from .kinetics import Kinetics # noqa
from .ssv2 import Ssv2 # noqa
try:
from .ptv_datasets import Ptvcharades, Ptvkinetics, Ptvssv2 # noqa
except Exception:
print("Please update your PyTorchVideo to latest master")
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
61d9f8923dce64ff0f4801e9d15ae1d5e69c756c
|
71f47bd812a420c9849ecc7609e99f9b969c4d3d
|
/push_endpoint/migrations/0018_pusheddata_datasource.py
|
1f6ba580925a882b85b136592f91b7ce41408b7e
|
[] |
no_license
|
erinspace/shareregistration
|
e2bd0d8086a60eac616057a225bda07a0cd385a9
|
e04bfe443fda49644a12778a4826c9cb04930f5b
|
refs/heads/master
| 2020-05-27T18:25:05.858413
| 2016-02-24T20:17:37
| 2016-02-24T20:17:37
| 30,875,095
| 0
| 1
| null | 2016-02-24T20:17:37
| 2015-02-16T15:47:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 467
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('push_endpoint', '0017_auto_20151113_1532'),
]
operations = [
migrations.AddField(
model_name='pusheddata',
name='datasource',
field=models.CharField(default='test', max_length=100),
preserve_default=False,
),
]
|
[
"erin.braswell@gmail.com"
] |
erin.braswell@gmail.com
|
85bb1583d91110edde4ea3a582c960e697bc9b4e
|
0bf6ecbdebc7424a8946b29127d55c5bc1e7442e
|
/organization/migrations/0062_auto_20170727_2109.py
|
146f45e3415b6fb573a6dc9d046e806ac30d424d
|
[] |
no_license
|
dekkerlab/cLIMS
|
2351a9c81f3e3ba982e073500a4a5cf2fd38ed51
|
e76731032a5707027b53746a8f2cc9b01ab7c04e
|
refs/heads/master
| 2021-03-27T06:28:49.718401
| 2017-10-10T19:22:33
| 2017-10-10T19:22:33
| 71,837,345
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-27 21:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wetLab', '0076_auto_20170727_2109'),
('organization', '0061_auto_20170426_1541'),
]
operations = [
migrations.AddField(
model_name='experiment',
name='authentication_docs',
field=models.ManyToManyField(blank=True, help_text='Attach any authentication document for your biosample here. e.g. Fragment Analyzer document, Gel images.', related_name='expAddProto', to='wetLab.Protocol', verbose_name='authentication_docs'),
),
migrations.AlterField(
model_name='experiment',
name='imageObjects',
field=models.ManyToManyField(blank=True, help_text='additional images.', related_name='expImg', to='dryLab.ImageObjects'),
),
]
|
[
"nanda@ankitas-mbp.ad.umassmed.edu"
] |
nanda@ankitas-mbp.ad.umassmed.edu
|
e4771e45015373752e3153f63e5089990296b822
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_312/ch6_2019_02_28_19_18_08_378411.py
|
51733cbb770c32be20d9f73baf7b228425fd72c1
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
def encontra_maximo(lista):
max=lista[0][0]
eixox=len(lista)-1
eixoy=len(lista[0])-1
count=0
count2=0
while count<eixox:
while count2<eixoy:
if lista[count][count2]>max:
max=lista[count][count2]
count2+=1
count+=1
return max
|
[
"you@example.com"
] |
you@example.com
|
fb3acbea24022a162db419f7994a985a3cf44ed8
|
c331d0f5e3d4ae0c60dd5cc7aa3dc9c76faec88c
|
/WebApp/python/setup_db.py
|
e39937533186ae75b43b74afa11e44c1e6d8c2e7
|
[] |
no_license
|
sahahn/BPt_app
|
744be29dad8710b5113a50cd12d8d250e51587d6
|
f849a8bad43419b334000e57f2ce874d38a6d3d5
|
refs/heads/master
| 2023-03-09T01:50:44.401955
| 2020-10-19T17:59:37
| 2020-10-19T17:59:37
| 280,518,561
| 0
| 0
| null | 2020-10-19T17:59:39
| 2020-07-17T20:25:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,872
|
py
|
import os
import json
import shutil
from Dataset import Dataset
def process_dataset(base_loc, dataset_name):
# Init dataset with locs, etc...
dataset = Dataset(base_loc=base_loc,
dataset_name=dataset_name)
# Process files (skips if not needed)
dataset.process_files()
def process_datasets(base_loc):
# Make data info if doesnt exist
data_info_loc = os.path.join(base_loc, 'bpt/Data_Info')
os.makedirs(data_info_loc, exist_ok=True)
# Process each dataset
sources_loc = os.path.join(base_loc, 'sources')
datasets = [f for f in os.listdir(sources_loc) if not f.startswith('.')]
for dataset in datasets:
process_dataset(base_loc, dataset)
# Check each dataset for its events
# Also check to make sure dataset isnt empty
non_empty_datasets = []
all_events = set()
for dataset in datasets:
event_file = os.path.join(data_info_loc, dataset, 'eventnames.json')
with open(event_file, 'r') as f:
events = set(json.load(f))
all_events.update(events)
# Only add dataset if atleast 1 event (only 0 events when empty)
if len(events) > 0:
non_empty_datasets.append(dataset)
# Save overlapped events
all_events_loc = os.path.join(base_loc, 'bpt/all_events.json')
with open(all_events_loc, 'w') as f:
json.dump(list(all_events), f)
# Save datasets.json w/ non-empty datasets
datasets_loc = os.path.join(base_loc, 'bpt/datasets.json')
with open(datasets_loc, 'w') as f:
json.dump(sorted(non_empty_datasets), f)
# Go through and delete any saved data info if
# not in the compiled datasets
saved_datasets = os.listdir(data_info_loc)
for dataset in saved_datasets:
if dataset not in non_empty_datasets:
shutil.rmtree(os.path.join(data_info_loc, dataset))
def main():
base_loc = '/var/www/html/data'
# Locs + checks
lock_loc = os.path.join(base_loc, 'bpt/lock')
ready_loc = os.path.join(base_loc, 'bpt/ready')
error_loc = os.path.join(base_loc, 'bpt/process_datasets_errors.txt')
# Check for db-add lock
if (os.path.isfile(lock_loc)):
return None
else:
with open(lock_loc, 'w') as f:
f.write('locked')
# If previous error file exists, remove it
if os.path.exists(error_loc):
os.remove(error_loc)
# Call process datasets only if no
try:
process_datasets(base_loc)
# If processed no errors add ready
with open(ready_loc, 'w') as f:
f.write('ready')
# If error, save to text file
except Exception as e:
with open(error_loc, 'w') as f:
f.write(repr(e))
# Remove the lock - regardless of if error or success
os.remove(lock_loc)
if __name__ == "__main__":
main()
|
[
"sahahn@uvm.edu"
] |
sahahn@uvm.edu
|
6e82d9acc53a5eabc323bec3d2068e5365b6bdec
|
42f83595d24addd3cf8be828e282b37509825b3e
|
/src/collective/cfgconfig/view.py
|
dd91876c2f239c361f74514a6c6a9a457b10ca9f
|
[] |
no_license
|
datakurre/collective.cfgconfig
|
825a26b7704932b5ea70f688cda8112623b42493
|
3325c6cbd5defd40c40bce7ed43814e9f77263ae
|
refs/heads/master
| 2016-09-06T21:39:03.209274
| 2013-11-24T12:13:50
| 2013-11-24T12:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
# -*- coding: utf-8 -*-
from Products.Five.browser import BrowserView
class HelloWorld(BrowserView):
def __call__(self):
return u"Hello world"
|
[
"asko.soukka@iki.fi"
] |
asko.soukka@iki.fi
|
e45b7ab72944b666ff1639a4f0be0b3e38507d7b
|
9947d1e328a3262a35a61385dc537c3dc557ab7d
|
/机器学习/day05/demo07_ac.py
|
334d5013ba072bb2730ca8167dd3d93753dc93a3
|
[] |
no_license
|
nuass/lzh
|
d0a7c74a3295523d1fe15eeaa73997fc04469f06
|
3cb1cf1e448b88ade226d113a7da4eab7bbb5c09
|
refs/heads/master
| 2021-02-06T06:10:32.772831
| 2019-06-10T08:54:49
| 2019-06-10T08:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
# coding=utf-8
"""
凝聚层次
"""
import numpy as np
import sklearn.cluster as sc
import matplotlib.pyplot as mp
x = np.loadtxt("../ml_data/multiple3.txt",delimiter=",")
model=sc.AgglomerativeClustering(n_clusters=4)
pred_y = model.fit_predict(x)
mp.figure("AgglomerativeClustering",facecolor="lightgray")
mp.title("AgglomerativeClustering",fontsize=14)
mp.xlabel("x",fontsize=12)
mp.ylabel("y",fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=":")
mp.scatter(x[:,0],x[:,1],s=60,marker='o',c=pred_y,cmap="brg",label="Sample Points")
mp.legend()
mp.show()
|
[
"1581627402@qq.com"
] |
1581627402@qq.com
|
e269cfa019462d7b553ac3efa865d6eca08f96e4
|
19f1dc4e728714e66af8e1e8262f2b7c47d3beb6
|
/Samples/UserSamples/2017/STTConfig.py
|
3e551660149292cc15d24516e961c55717591b35
|
[] |
no_license
|
samhiggie/DatacardCreator
|
74cbbbea928770d3ca5669604d96ffb582416b45
|
8e838816cfb9adee8b2276adf79904da6449ca52
|
refs/heads/master
| 2020-09-11T19:35:48.985441
| 2019-11-18T23:51:18
| 2019-11-18T23:51:18
| 222,169,538
| 0
| 0
| null | 2019-11-16T22:56:02
| 2019-11-16T22:56:02
| null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
from Samples.SampleDefinition import Sample
from Samples.Uncertainties.UserUncertainties.TES import TESUncertainty
from Samples.Uncertainties.UserUncertainties.JES import JESUncertainty
from Samples.Uncertainties.UserUncertainties.METUES import METUESUncertainty
from Samples.Uncertainties.UserUncertainties.MuonES import MuonESUncertainty
from Samples.Uncertainties.UserUncertainties.Prefiring import PrefiringUncertainty
from Samples.Uncertainties.UserUncertainties.TauID import TauIDUncertainty
from Samples.Uncertainties.UserUncertainties.Trigger17_18 import Trigger1718Uncertainty
from Samples.EventDefinition.UserEventDictionaries.MuTauEventDictionary import MuTauEventDictionary
STSample = Sample()
STSample.name = 'STT'
STSample.path = '/data/aloeliger/SMHTT_Selected_2017_Deep/'
STSample.files = ['ST_t_top.root',
'ST_t_antitop.root',
'ST_tW_top.root',
'ST_tW_antitop.root']
STSample.definition = '(gen_match_1 == 1 || gen_match_1 == 2) && gen_match_2 == 5'
STSample.uncertainties = [
TESUncertainty(),
JESUncertainty(),
METUESUncertainty(),
MuonESUncertainty(),
PrefiringUncertainty(),
TauIDUncertainty(),
Trigger1718Uncertainty(),
]
STSample.eventDictionaryInstance = MuTauEventDictionary
STSample.CreateEventWeight = STSample.CreateEventWeight_Standard
|
[
"aloelige@cern.ch"
] |
aloelige@cern.ch
|
e32886648d45201263ed378387cc9fab9df32a4e
|
d68be566e1b7dbb9c716b8165e9d546a6e294e5d
|
/course/models.py
|
265df4287ea82f383070bffcb7676c8a7a8d5f77
|
[] |
no_license
|
NeuSovo/Neusoft-ecard
|
0e5d525360522d4abf3a7f39ec4d205ec17d571d
|
41138be9280fc92e98d6dce7394ac66204672b40
|
refs/heads/master
| 2021-03-24T09:32:55.543809
| 2018-09-12T04:48:29
| 2018-09-12T04:48:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,955
|
py
|
from django.db import models
# Create your models here.
class RoomModel(models.Model):
class Meta:
verbose_name = "RoomModel"
verbose_name_plural = "RoomModels"
def info(self):
result = {
'RoomID': self.RoomID,
'RoomTime': self.RoomTime,
'RoomWeek': self.RoomWeek,
'ClassName': self.ClassName,
'ClassTeacher': self.ClassTeacher,
'ClassTime': self.ClassTime,
'RoomCount': self.RoomCount
}
RoomFloor = models.CharField(
max_length=10
)
RoomID = models.CharField(
max_length=30
)
RoomTime = models.IntegerField(
default=0
)
RoomWeek = models.IntegerField(
default=0
)
ClassName = models.CharField(
max_length=155,
)
ClassTeacher = models.CharField(
max_length=155,
)
ClassTime = models.CharField(
max_length=100,
)
RoomCount = models.IntegerField(
default=0
)
class RoomTest(models.Model):
class Meta:
verbose_name = "课程信息"
verbose_name_plural = "课程信息"
ordering = ['id']
def info(self, has_grade=False):
result = {
'RoomID': self.RoomID,
'ClassName': self.ClassName,
'ClassTeacher': self.ClassTeacher,
'ClassWeek': self.ClassWeek,
'ClassCount': self.ClassCount,
'ClassTimeWeek': self.ClassTimeWeek,
'ClassTimeTime': self.ClassTimeTime
}
if has_grade:
result['ClassGrade'] = self.ClassGrade
return result
ClassTimeTime_choices = (
('1', '1-2节'),
('2', '3-4节'),
('3', '5-6节'),
('4', '7-8节'),
('5', '9-10节'),
('5', '9-11节'),
('1-2', '1-4节'),
('1-2-3-4', '1-8节'),
('3-4', '5-7节'),
('3-4', '5-8节'),
('1-2-3-4', '1-8节'),
('1-2-3-4-5', '1-10节'),
('1-2-3-4-5', '1-11节'),
)
ClassTimeWeek_choices = (
(1, '周一'),
(2, '周二'),
(3, '周三'),
(4, '周四'),
(5, '周五'),
(6, '周六'),
(7, '周日'),
)
RoomID = models.CharField(
max_length=30,
null=True
)
ClassName = models.CharField(
max_length=155,
null=True
)
ClassTeacher = models.CharField(
max_length=120,
null=True,
default='0'
)
ClassWeek = models.CharField(
max_length=30,
null=True
)
ClassCount = models.IntegerField(default=0)
ClassGrade = models.TextField(
default='0',
null=True
)
ClassTimeWeek = models.IntegerField(
default=0, choices=ClassTimeWeek_choices)
ClassTimeTime = models.CharField(
default='0',
max_length=10,
choices=ClassTimeTime_choices,
null=True
)
|
[
"zhangxh1997@gmail.com"
] |
zhangxh1997@gmail.com
|
04bd05b63a5e2c8534a1ddac43a2d5cafcb436e0
|
133dc799865134325975afeff2d1aa1ed4a1f5ca
|
/migrations/versions/15fd2af90843_users_table.py
|
fd6d349e4f08d2cc27f0e80cb3078dc01b9cf887
|
[] |
no_license
|
Serrones/microblog
|
5eb72baf86ad363e607ac29775f8c1f24234a18d
|
917eec12890c8485d44dbef4742dae268837c15b
|
refs/heads/master
| 2020-03-14T16:48:47.328601
| 2018-05-07T00:01:20
| 2018-05-07T00:01:20
| 131,705,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
"""Users Table
Revision ID: 15fd2af90843
Revises:
Create Date: 2018-05-01 17:23:06.763488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '15fd2af90843'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
[
"fabioserrones@gmail.com"
] |
fabioserrones@gmail.com
|
37465d7be87d3f32cd3ed96223af113778b5d931
|
24d6d41989d676f3532013de3a6d847586fa3663
|
/permissions_widget/settings.py
|
adac7edee3ac2392f01a48f2c9f39405a49144f4
|
[] |
no_license
|
diegofer/compu
|
92da75e79a4f286840f127698961bd1f99edf567
|
4407896e899e057a928f63455f29bba370bf5c7a
|
refs/heads/master
| 2021-01-22T19:54:11.588140
| 2014-04-01T05:41:59
| 2014-04-01T05:41:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,739
|
py
|
"""
Settings for permissions_widget.
EXCLUDE_APPS
The permissions widget will exclude any permission for any model in any app
in the EXCLUDE_APPS list. It contains sensible defaults which you can
override: sessions, admin and contenttypes for example, as in most cases
users won't even have the possibility of adding/changing/deleting sessions,
logentries and content types so why even bother proposing permissions for
them ? This would just confuse the admin.
Can be overridden in settings.PERMISSIONS_WIDGET_EXCLUDE_APPS.
EXCLUDE_MODELS
The permissions widget will exclude any permission for any listed model.
Models should be listed in the form of `app.model`.
Can be overridden in settings.PERMISSIONS_WIDGET_EXCLUDE_MODELS.
PATCH_GROUPADMIN
If True, `permissions_widget.admin` will override the registered GroupAdmin
form's user_permission field to use this widget for permissions.
Can be overridden (ie. to False) in
settings.PERMISSIONS_WIDGET_PATCH_GROUPADMIN.
PATCH_USERADMIN
If True, `permissions_widget.admin` will override the registered UserAdmin
form's user_permission field to use this widget for permissions.
Can be overridden (ie. to False) in
settings.PERMISSIONS_WIDGET_PATCH_USERADMIN.
"""
from django.conf import settings
EXCLUDE_APPS = getattr(settings, 'PERMISSIONS_WIDGET_EXCLUDE_APPS', [
'sites', 'reversion', 'contenttypes', 'admin', 'sessions',
'easy_thumbnails',])
EXCLUDE_MODELS = getattr(settings, 'PERMISSIONS_WIDGET_EXCLUDE_MODELS', [
'auth.permission',])
#PATCH_USERADMIN = getattr(settings, 'PERMISSIONS_WIDGET_PATCH_USERADMIN', True)
#PATCH_GROUPADMIN = getattr(settings, 'PERMISSIONS_WIDGET_PATCH_GROUPADMIN', True)
|
[
"diegofernando83@gmail.com"
] |
diegofernando83@gmail.com
|
84019c47c3970e23b49d08af58fa3ddfb4190e74
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/programming_computer_vision_with_python/cvbook-contrib/ch05_stereo.py
|
8cfe22f5dbe6c0a38f1888e9cb984ffbc05f7bdd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
from PIL import Image
import numpy
import stereo
im_l = numpy.array(Image.open('out_stereo1.ppm').convert('L'), 'f')
im_r = numpy.array(Image.open('out_stereo2.ppm').convert('L'), 'f')
steps = 12
start = 4
wid = 9
res = stereo.plane_sweep_ncc(im_l, im_r, start, steps, wid)
wid = 3
res_gauss = stereo.plane_sweep_gauss(im_l, im_r, start, steps, wid)
import scipy.misc
scipy.misc.imsave('out_depth.png', res)
scipy.misc.imsave('out_depth_gauss.png', res_gauss)
|
[
"bb@b.om"
] |
bb@b.om
|
2d9b1d187eb1175b5bcb291481e18ea6d1dd82b2
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/albatrossandco/brubeck_cms/brubeck/common/geography/fields.py
|
a782283b0983066628ff4b3c9c3ac8bb6bd8e614
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 3,849
|
py
|
from django import forms
from django.conf import settings
from django.db import models
from django.utils.safestring import mark_safe
class Coordinates:
def __init__(self, lat, lng):
self.lat = float(lat)
self.lng = float(lng)
def __repr__(self):
return ','.join([str(self.lat), str(self.lng)])
lat = float()
lng = float()
# NOTE: Came from http://www.djangosnippets.org/snippets/615/ (-JCM)
# The development of this code was sponsored by MIG Internacional
# This code is released under the terms of the BSD license
# http://code.djangoproject.com/browser/django/trunk/LICENSE
# Feel free to use it at your whim/will/risk :D
# Contact info: Javier Rojas <jerojasro@gmail.com>
class LocationWidget(forms.widgets.Widget):
def __init__(self, *args, **kw):
super(LocationWidget, self).__init__(*args, **kw)
self.inner_widget = forms.widgets.HiddenInput()
def render(self, name, value, *args, **kwargs):
try:
lat = value.lat
lng = value.lng
except AttributeError:
lat = settings.DEFAULT_LATITUDE
lng = settings.DEFAULT_LONGITUDE
js = '''
</script>
<script type="text/javascript">
//<![CDATA[
var %(name)s_marker ;
$(document).ready(function () {
if (GBrowserIsCompatible()) {
var map = new GMap2(document.getElementById("map_%(name)s"));
map.setCenter(new GLatLng(%(default_lat)s,%(default_lng)s), 13);
%(name)s_marker = new GMarker(new GLatLng(%(default_lat)s,%(default_lng)s), {draggable: true});
map.addOverlay(%(name)s_marker);
map.addControl(new GLargeMapControl());
map.addControl(new GMapTypeControl());
$('#%(name)s_id')[0].value = %(name)s_marker.getLatLng().lat() + "," + %(name)s_marker.getLatLng().lng();
GEvent.addListener(%(name)s_marker, "dragend", function() {
var point = %(name)s_marker.getLatLng();
$('#%(name)s_id')[0].value = point.lat() + "," + point.lng();
});
}});
$(document).unload(function () {GUnload()});
//]]>
</script>
''' % {'name': name, 'default_lat': lat, 'default_lng': lng}
# % dict(name=name)
html = self.inner_widget.render("%s" % name, None, dict(id='%s_id' % name))
html += "<div id=\"map_%s\" style=\"width: 500px; height: 500px\"></div>" % name
return mark_safe(js+html)
class LocationField(forms.Field):
widget = LocationWidget
def clean(self, value):
lat, lng = value.split(',')
return Coordinates(lat, lng)
# My stuff again. (-JCM)
class CoordinatesField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 70
kwargs['default'] = Coordinates(settings.DEFAULT_LATITUDE, settings.DEFAULT_LONGITUDE)
super(CoordinatesField, self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(value, Coordinates):
return value
lat, lng = value.split(',')
return Coordinates(lat, lng)
def get_db_prep_value(self, value, connection, prepared=True):
return str(value)
def formfield(self, **kwargs):
defaults = {'form_class': LocationField}
defaults.update(kwargs)
return super(CoordinatesField, self).formfield(**defaults)
def db_type(self, connection):
return 'varchar(70)'
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
lat, lng = str(value).split(',')
return '%s, %s' % (str(lat).strip(), str(lng).strip())
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
46e42a8851daf5a097db3ca58345f605faf477e9
|
67c0d7351c145d756b2a49e048500ff361f7add6
|
/xpresso/ai/admin/infra/packages/package_dependency.py
|
da30e097a2988bd38de9035b38da48c3c253c61f
|
[] |
no_license
|
Krishnaarunangsu/XpressoDataHandling
|
ba339ae85b52e30715f47406ddb74966350848aa
|
0637a465088b468d6fdb6d1bb6f7b087547cec56
|
refs/heads/master
| 2020-06-27T19:58:43.358340
| 2019-08-29T16:59:08
| 2019-08-29T16:59:08
| 200,035,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,870
|
py
|
""" Package Dependency MOdule
"""
from xpresso.ai.admin.controller.exceptions.xpr_exceptions import \
PackageFailedException
__all__ = ["PackageDependency"]
__author__ = "Srijan Sharma"
import json
import os
import networkx as nx
import matplotlib.pyplot as plt
from xpresso.ai.core.utils.xpr_config_parser import XprConfigParser
from xpresso.ai.core.logging.xpr_log import XprLogger
class PackageDependency:
"""
Created a directed acyclic package dependency graph
using a given dependency json.
"""
NONE_PACKAGE = "None"
DEPENDENCY_SECTION = "pkg_dependency"
DEPENDENCY_CONFIG_FILE = "dependency_config_file"
def __init__(self, config_path=XprConfigParser.DEFAULT_CONFIG_PATH):
super().__init__()
self.config = XprConfigParser(config_path)["packages_setup"]
self.logger = XprLogger()
dependency_config_file = self.config[self.DEPENDENCY_SECTION][
self.DEPENDENCY_CONFIG_FILE]
if not os.path.exists(dependency_config_file):
self.logger.error(("Unable to find the dependency js"
"file at the mentioned path"))
raise PackageFailedException("Invalid dependency config file")
try:
with open(dependency_config_file) as config_fs:
dependency_config = json.load(config_fs)
except EnvironmentError as err:
self.logger.fatal(err)
raise PackageFailedException("Invalid config file")
self.graph = nx.DiGraph()
edges = list()
for key in dependency_config:
for value in dependency_config[key]:
edges.append((key, value))
self.graph.add_edges_from(edges)
if not nx.is_directed_acyclic_graph(self.graph):
self.logger.fatal(("Unable to handle dependencies due to cyclic "
"loop"))
self.graph = None
raise PackageFailedException("Cyclic Dependency Found")
def visualize_dependency_graph(self):
"""
Created a plot for the directed dependency graph
"""
if self.graph is None:
self.logger.error("Graph value none cannot be plotted")
return
nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)
plt.show()
def check_if_supported(self, package_name: str):
"""
Args:
package_name(str)
:return:
bool: Return True if supported. False, otherwise
"""
return bool(self.graph.has_node(package_name))
def list_all(self):
"""
Extracts the value of all nodes(packages) present in graph
Returns:
list: Array consisting of all node(packages) value
"""
if self.graph is None:
self.logger.error("Graph value none cannot be iterated")
return list()
nodes = list()
for node in self.graph.nodes():
if node == self.NONE_PACKAGE:
continue
nodes.append(node)
return nodes
def get_dependency(self, package_name: str) -> list:
"""
List of dependencies
Args:
package_name(str): Name of the package
Returns:
list: List of dependencies required for the package_name
installation
"""
if not self.check_if_supported(package_name=package_name):
self.logger.error("{} package not present in config"
.format(package_name))
return list()
self.logger.info(("Running Topological sorting on "
"Package Dependency Graph"))
try:
topological_sort_list = list(reversed(list(
nx.topological_sort(self.graph))))
except nx.NetworkXError as error:
self.logger.error(error)
raise PackageFailedException("Topological sort is defined for "
"directed graphs only")
except nx.NetworkXUnfeasible as error:
self.logger.error(error)
raise PackageFailedException("Not a directed acyclic graph (DAG) "
"and hence no topological sort exists")
descendants = nx.descendants(self.graph, package_name)
dependent_packages = []
for pkg in topological_sort_list:
if pkg in descendants and pkg != self.NONE_PACKAGE:
dependent_packages.append(pkg)
if package_name != self.NONE_PACKAGE:
dependent_packages.append(package_name)
return dependent_packages
if __name__ == "__main__":
pkg_dep = PackageDependency()
pkg_dep.visualize_dependency_graph()
print(pkg_dep.list_all())
print(pkg_dep.get_dependency("PythonPackage"))
|
[
"arunangsutech@gmail.com"
] |
arunangsutech@gmail.com
|
aed4b7492ab51ac5f0be52b4803a554e1a88e1a5
|
d33d25c752aa9604ccbd3ce75a26d31e8a12151a
|
/models/spational_transformer_sampler_interp.py
|
0e4a348dcc3db3c7a4160ae048eb387f2e289d4f
|
[] |
no_license
|
yasunorikudo/sfm-learner-chainer
|
78bbb080c54e6af4278f31448d7b4067492b2dce
|
06d722a2a71ea9c51c4755862be7b211c35ac2b1
|
refs/heads/master
| 2022-07-07T12:03:10.076957
| 2019-08-07T07:39:59
| 2019-08-07T07:39:59
| 200,998,542
| 0
| 0
| null | 2022-06-21T22:28:07
| 2019-08-07T07:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,722
|
py
|
import numpy
import chainer
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
from chainer import cuda
class SpatialTransformerSamplerInterp(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 == n_in)
x_type = in_types[0]
grid_type = in_types[1]
type_check.expect(
x_type.dtype.char == 'f',
grid_type.dtype.char == 'f',
x_type.ndim == 4,
grid_type.ndim == 4,
grid_type.shape[1] == 2,
x_type.shape[0] == grid_type.shape[0],
)
def forward_cpu(self, inputs):
return self._forward(inputs)
def forward_gpu(self, inputs):
return self._forward(inputs)
def _forward(self, inputs):
x, grid = inputs
xp = cuda.get_array_module(x)
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
u = grid[:, 0].reshape(-1)
v = grid[:, 1].reshape(-1)
u0 = xp.floor(u)
u1 = u0 + 1
v0 = xp.floor(v)
v1 = v0 + 1
u0 = u0.clip(0, W - 1)
v0 = v0.clip(0, H - 1)
u1 = u1.clip(0, W - 1)
v1 = v1.clip(0, H - 1)
# weights
wt_x0 = u1 - u
wt_x1 = u - u0
wt_y0 = v1 - v
wt_y1 = v - v0
w1 = wt_x0 * wt_y0
w2 = wt_x1 * wt_y0
w3 = wt_x0 * wt_y1
w4 = wt_x1 * wt_y1
w1 = w1.astype(x.dtype)
w2 = w2.astype(x.dtype)
w3 = w3.astype(x.dtype)
w4 = w4.astype(x.dtype)
u0 = u0.astype(numpy.int32)
v0 = v0.astype(numpy.int32)
u1 = u1.astype(numpy.int32)
v1 = v1.astype(numpy.int32)
batch_index = xp.repeat(xp.arange(B), out_H * out_W)
y = w1[:, None] * x[batch_index, :, v0, u0]
y += w2[:, None] * x[batch_index, :, v0, u1]
y += w3[:, None] * x[batch_index, :, v1, u0]
y += w4[:, None] * x[batch_index, :, v1, u1]
y = y.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
return y,
def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def backward_gpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def _backward(self, inputs, grad_outputs):
x, grid = inputs
xp = cuda.get_array_module(x)
gy, = grad_outputs
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
u = grid[:, 0].reshape(-1)
v = grid[:, 1].reshape(-1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u)
u1 = u0 + 1
v0 = xp.floor(v)
v1 = v0 + 1
u0 = u0.clip(0, W - 1)
v0 = v0.clip(0, H - 1)
u1 = u1.clip(0, W - 1)
v1 = v1.clip(0, H - 1)
# weights
wt_x0 = u1 - u
wt_x1 = u - u0
wt_y0 = v1 - v
wt_y1 = v - v0
wt_x0 = wt_x0.astype(gy.dtype)
wt_x1 = wt_x1.astype(gy.dtype)
wt_y0 = wt_y0.astype(gy.dtype)
wt_y1 = wt_y1.astype(gy.dtype)
u0 = u0.astype(numpy.int32)
v0 = v0.astype(numpy.int32)
u1 = u1.astype(numpy.int32)
v1 = v1.astype(numpy.int32)
batch_index = xp.repeat(xp.arange(B), out_H * out_W)
x_indexed_1 = x[batch_index, :, v0, u0]
x_indexed_2 = x[batch_index, :, v0, u1]
x_indexed_3 = x[batch_index, :, v1, u0]
x_indexed_4 = x[batch_index, :, v1, u1]
gu = -wt_y0[:, None] * x_indexed_1
gu += wt_y0[:, None] * x_indexed_2
gu -= wt_y1[:, None] * x_indexed_3
gu += wt_y1[:, None] * x_indexed_4
gv = -wt_x0[:, None] * x_indexed_1
gv -= wt_x1[:, None] * x_indexed_2
gv += wt_x0[:, None] * x_indexed_3
gv += wt_x1[:, None] * x_indexed_4
gu = gu.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gv = gv.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gu *= gy
gv *= gy
gu = xp.sum(gu, axis=1)
gv = xp.sum(gv, axis=1)
# Offsets scaling of the coordinates and clip gradients.
ggrid = xp.concatenate((gu[:, None], gv[:, None]), axis=1)
gx = xp.zeros_like(x)
return gx, ggrid
def spatial_transformer_sampler_interp(x, grid, **kwargs):
argument.check_unexpected_kwargs(
kwargs, use_cudnn="The argument \"use_cudnn\" is not "
"supported anymore. "
"Use chainer.using_config('use_cudnn', value) "
"context where value can be `always`, `never`, or `auto`.")
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerSamplerInterp()(x, grid)
|
[
"yukitsuji020832@gmail.com"
] |
yukitsuji020832@gmail.com
|
02cc57abadc1b35abd7611414ff7e4803bf5be52
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/minCost_20200826170809.py
|
416df3034399b192e979acb250e8e4c2b5d35bb5
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
def minCost(days,costs):
# brute force approach
# find if numbers are consecutive
# if they are past 7 then means we do a 30 day pass
# once they stop being consecutive means to opt for something different
# like [1,4,6,7,8,20]
ways = [0] * days[len(days)-1]
newDays = set(days)
for i in range(1,len(ways)+1):
total = ways[i-1]+ costs[0]
if i-7 > 0:total1 = ways[i-7] + costs[1]
else:total1 = 0 + costs[1]
if i-15 > 0:total2 = ways[i-15] + costs[2]
else: total2 = 0 + costs[2]
if i in newDays:
ways[i] = min(total,total1,total2)
else:
ways[i] = ways[i-1]
print(ways)
minCost([1,4,6,7,8,20],[2,7,15])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
a0b93c64d418d2f9953bebcebd810c6e68451a2e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_088/ch20_2020_09_11_22_25_42_229462.py
|
9fda66b3766ee69c033f0fc481cd067588ecd032
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
distância = float(input("Digite a distância a percorrer: "))
if (distância <= 200):
total = 0.5 * distância
print("%.2f" %total)
else:
total = 0.45 * (distância)
print("%.2f" %total)
|
[
"you@example.com"
] |
you@example.com
|
16a1f15487d6a04ef8e315b7e87984f406ce40f4
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/vns/rtconntoaconninst.py
|
0a35dac1b96046c398e2ffe6d152e8610f74460e
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,644
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtConnToAConnInst(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.vns.RtConnToAConnInst", "cobra.model.vns.FuncConnInst")
meta.moClassName = "vnsRtConnToAConnInst"
meta.rnFormat = "rtconnToAConnInst-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Connector Instance"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x6000000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.vns.AbsFuncConn")
meta.parentClasses.add("cobra.model.vns.FuncConnInst")
meta.parentClasses.add("cobra.model.vns.AbsTermConn")
meta.parentClasses.add("cobra.model.vns.TermConnInst")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtconnToAConnInst-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20739, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4830
prop.defaultValueStr = "vnsFuncConnInst"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsFuncConnInst", None, 4830)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 20738, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsGraphToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsNodeToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("NodeInstToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsGraphToCompVNic", "Virtual Nics", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsNodeToCompVNic", "Virtual Nics", "cobra.model.comp.VNic"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("NodeInstToCompVNic", "Virtual Nics", "cobra.model.comp.VNic"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AbsGraphToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
845bd92c060e393e1feb07efce537f9a3b65d67b
|
f3cd7727bb731e359e93e86771ed66ccc4587937
|
/generic_images/managers.py
|
d462ed0e927ec90f5d3d9b728c53544c74c016ad
|
[
"MIT"
] |
permissive
|
kmike/django-generic-images
|
bb8344751c27056c88abedb6a3669204f0b5b25b
|
4e45068ed219ac35396758eb6b6e1fe5306147df
|
refs/heads/origin/master
| 2023-08-18T04:12:04.668596
| 2009-12-25T15:45:13
| 2009-12-25T15:45:13
| 2,316,219
| 5
| 3
| null | 2017-11-10T15:16:30
| 2011-09-02T20:16:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,306
|
py
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_model
from generic_utils.managers import GenericModelManager
def get_model_class_by_name(name):
app_label, model_name = name.split(".")
model = get_model(app_label, model_name, False)
return model
class ImagesAndUserManager(models.Manager):
""" Useful manager for models that have AttachedImage (or subclass) field
and 'injector=GenericIngector()' manager.
"""
def __init__(self, *args, **kwargs):
try:
image_model_class = kwargs.pop('image_model_class')
except KeyError:
image_model_class = 'generic_images.AttachedImage'
self.image_model_class = get_model_class_by_name(image_model_class)
super(ImagesAndUserManager, self).__init__(*args, **kwargs)
def select_with_main_images(self, limit=None, **kwargs):
''' Select all objects with filters passed as kwargs.
For each object it's main image instance is accessible as ``object.main_image``.
Results can be limited using ``limit`` parameter.
Selection is performed using only 2 or 3 sql queries.
'''
objects = self.get_query_set().filter(**kwargs)[:limit]
self.image_model_class.injector.inject_to(objects,'main_image', is_main=True)
return objects
def for_user_with_main_images(self, user, limit=None):
return self.select_with_main_images(user=user, limit=limit)
def get_for_user(self, user):
objects = self.get_query_set().filter(user=user)
return objects
class AttachedImageManager(GenericModelManager):
''' Manager with helpful functions for attached images
'''
def get_for_model(self, model):
''' Returns all images that are attached to given model.
Deprecated. Use `for_model` instead.
'''
return self.for_model(model)
def get_main_for(self, model):
'''
Returns main image for given model
'''
try:
return self.for_model(model).get(is_main=True)
except models.ObjectDoesNotExist:
return None
|
[
"kmike84@gmail.com"
] |
kmike84@gmail.com
|
63456deeb37fe3d0953db49310e7b28446f990fe
|
f4924a0a6d1eb17f3b7dca035f7dedfe0231254a
|
/src/dsgrn_net_query/queries/CountStableFC_large_networks.py
|
be619af9d650d362d75488d495c1be52ad016a78
|
[
"MIT"
] |
permissive
|
julianfox8/dsgrn_net_query
|
b22f4ac3f75a6d0d21fc7b3a703389486b7a27f6
|
89df8bded9d60384864b04703ef52dfbd52632d9
|
refs/heads/master
| 2023-08-22T08:36:01.137658
| 2021-10-01T17:25:50
| 2021-10-01T17:25:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,304
|
py
|
import DSGRN
import os, json, sys,subprocess,ast,shutil
def query(network_file,params_file,resultsdir=""):
'''
:param network_file: a .txt file containing either a single DSGRN network specification or a list of network specification strings in DSGRN format
:param params_file: A json file with the keys
"num_proc" = number of processes to use for each database creation
"count" = True or False (true or false in .json format)
whether or not to return the number of matches (True) or just whether or not there is at least one match (False)
"datetime" : optional datetime string to append to subdirectories in resultsdir, default = system time
:param resultsdir: optional path to directory where results will be written, default is current directory
:return: Writes a .json file containing a dictionary keyed by DSGRN network specification with a list of results.
The results are DSGRN parameter count that have at least one Morse set that is a stable full cycle,
or True (existence of at least one stable full cycle) or False (none exist), depending on the value of the parameter "count".
The size of the DSGRN parameter graph for the network is also recorded.
{ networkspec : [result, DSGRN param graph size] }.
'''
networks = read_networks(network_file)
params = json.load(open(params_file))
datetime = None if "datetime" not in params else params["datetime"]
if not networks:
raise ValueError("No networks available for analysis. Make sure network file is in the correct format.")
else:
num_proc, count = sanity_check(params)
results = {}
for k,netspec in enumerate(networks):
netfile = "temp{}.txt".format(k)
dbfile = "temp{}.db".format(k)
if os.path.exists(dbfile):
os.remove(dbfile)
with open(netfile,"w") as f:
f.write(netspec)
subprocess.check_call("mpiexec -n {} Signatures {} {}".format(num_proc,netfile,dbfile),shell=True)
db = DSGRN.Database(dbfile)
N = db.parametergraph.size()
matches = len(DSGRN.StableFCQuery(db).matches())
if count:
results[netspec] = (matches,N)
else:
results[netspec] = (matches > 0, N)
subprocess.call(["rm",netfile])
subprocess.call(["rm",dbfile])
print("Network {} of {} complete".format(k + 1, len(networks)))
sys.stdout.flush()
record_results(network_file,params_file,results,resultsdir,datetime)
def sanity_check(params):
'''
Checks to be sure the correct keys are in the dictionary params.
:param params: dictionary
:return: Either the values of the keys "num_proc" and "count" in the parameter dictionary, or an error is raised.
'''
if "num_proc" not in params or "count" not in params:
raise ValueError("The keys 'num_proc' and 'count' must be specified in the parameter file.")
return params["num_proc"],params["count"]
def record_results(network_file,params_file,results,resultsdir,datetime):
'''
Record results in a .json file.
:param network_file: The input .txt file containing the list of DSGRN network specifications.
:param params_file: The input .json parameter file.
:param results: The dictionary of results.
:param resultsdir: The location to save the dictionary of results.
:param datetime: None or string with datetime
:return: None. File is written.
'''
resultsdir = create_results_folder(network_file, params_file, resultsdir,datetime)
rname = os.path.join(resultsdir,"query_results.json")
if os.path.exists(rname):
os.rename(rname,rname+".old")
json.dump(results,open(rname,'w'))
print(resultsdir)
def read_networks(network_file):
'''
NOTE: Forced to copy from file_utilities due to collision between import of MPI and the mpiexec call inside this file.
Read a .txt network file that has either a single DSGRN network specification or a list of them
:param networks: A .txt file containing a single DSGRN network specification or a list of network specifications,
:return: list of DSGRN network specifications
'''
network_str = open(network_file).read()
if not network_str:
networks = []
elif network_str[0] == "[":
networks = ast.literal_eval(network_str)
else:
while network_str[-1] == '\n':
network_str = network_str[:-1]
networks = [network_str]
return networks
def create_results_folder(network_file, params_file, resultsdir,datetime):
'''
NOTE: Forced to copy from file_utilities due to collision between import of MPI and the mpiexec call inside this file.
Create a date-time stamped folder to save results. Copy over input files.
:param network_file: A .txt file
:param params_file: A .json file
:param resultsdir: optional path to directory where results will be written
:return: string containing path to date-time stamped directory to save results file
'''
if datetime is None:
datetime = subprocess.check_output(['date +%Y_%m_%d_%H_%M_%S'], shell=True).decode(sys.stdout.encoding).strip()
dirname = os.path.join(os.path.expanduser(resultsdir), "dsgrn_net_query_results" + datetime)
queriesdir = os.path.join(dirname, "queries" + datetime)
os.makedirs(queriesdir)
sys.stdout.flush()
inputfilesdir = os.path.join(dirname, "inputs" + datetime)
os.makedirs(inputfilesdir)
# save input files to computations folder
shutil.copy(network_file, inputfilesdir)
if params_file:
shutil.copy(params_file, inputfilesdir)
return queriesdir
if __name__ == "__main__":
if len(sys.argv) < 3:
print(
"Calling signature has two required arguments \n " \
"python CountStableFC_large_networks.py <path_to_network_file> <path_to_parameter_file>"
)
exit(1)
network_file = sys.argv[1]
params_file = sys.argv[2]
if len(sys.argv) > 3:
resultsdir = sys.argv[3]
query(network_file, params_file, resultsdir)
else:
query(network_file, params_file)
|
[
"breecummins@gmail.com"
] |
breecummins@gmail.com
|
ad41c5695cf98fe7852c8050c4ce5462a713dacf
|
de0d5fafb49f603ca4979d6f4c8eba52888714c2
|
/applied_social_network_analysis/network_connectivity/visualizing_networks.py
|
fac080b713f74e529d55299dc831ae26587a0fd8
|
[] |
no_license
|
sivaneshl/python_data_analysis
|
1ab42569d5cc843f79765332a30769588447d6f6
|
36af66ae9e03827f5dfe3cc64d993b84b1b31e9b
|
refs/heads/master
| 2020-09-11T17:28:51.459573
| 2020-07-05T18:43:59
| 2020-07-05T18:43:59
| 222,137,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,977
|
py
|
import networkx as nx
import matplotlib.pyplot as plt
G = nx.read_gpickle('resources/major_us_cities')
fig = plt.figure(figsize=(10, 9))
nx.draw_networkx(G) # uses default spring layout
# using random layout
plt.figure(figsize=(10, 9))
pos = nx.random_layout(G)
nx.draw_networkx(G, pos)
# circular layour
plt.figure(figsize=(10, 9))
pos = nx.circular_layout(G)
nx.draw_networkx(G, pos)
# using own layout by passing positions as the 'location' attribute
plt.figure(figsize=(10, 9))
pos = nx.get_node_attributes(G, 'location')
nx.draw_networkx(G, pos)
# change attributes
plt.figure(figsize=(10, 9))
nx.draw_networkx(G, pos, alpha=0.7, # transparency
with_labels=False, # remove labels
edge_color='0.4') # make edges grey
plt.axis('off') # remove the axis
plt.tight_layout() # reduce padding
# change node color, size and edge width
plt.figure(figsize=(10, 7))
node_color = [G.degree(v) for v in G] # set the node color based on the degree of the node
node_size = [0.0005*nx.get_node_attributes(G, 'population')[v] for v in G] # set the node size based on the population attribute
edge_width = [0.0005*G[u][v]['weight'] for u, v in G.edges()] # set the edge width based on weight of the edge
nx.draw_networkx(G, pos, node_size=node_size, node_color=node_color, edge_width=edge_width,
alpha=0.7, with_labels=False, edge_color='0.4', cmap=plt.cm.Blues)
plt.axis('off') # remove the axis
plt.tight_layout() # reduce padding
# draw specific edges and add labels to specific nodes
greater_than_770 = [x for x in G.edges(data=True) if x[2]['weight'] > 770]
nx.draw_networkx_edges(G, pos, edgelist=greater_than_770, edge_color='r', alpha=0.7, edge_width=6)
nx.draw_networkx_labels(G, pos, labels={'Los Angeles, CA': 'LA', 'New York, NY': 'NYC'},
font_size=18, font_color='white')
plt.axis('off') # remove the axis
plt.tight_layout() # reduce padding
plt.show()
|
[
"sivaneshl@virtusa.com"
] |
sivaneshl@virtusa.com
|
496912f6a5efc1cfacb3505a445c8d08b57768e8
|
601a5ac66309608772db5a9fa65faca4a0acad4f
|
/spyder/plugins/completion/providers/snippets/widgets/__init__.py
|
6a3215fe6b5108d8e1e1b68a4a100bb027af8530
|
[
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LGPL-3.0-only",
"LicenseRef-scancode-free-unknown",
"LGPL-3.0-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-or-later",
"CC-BY-2.5",
"CC-BY-4.0",
"MIT",
"LGPL-2.1-only",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"Python-2.0",
"GPL-2.0-only",
"Apache-2.0",
"GPL-3.0-only",
"GPL-1.0-or-later"
] |
permissive
|
juanis2112/spyder
|
ea5e5727d4dbec5c3e40cb87aad644cc722ff27e
|
0b4929cef420ba6c625566e52200e959f3566f33
|
refs/heads/master
| 2021-08-09T15:14:49.011489
| 2021-04-28T20:18:06
| 2021-04-28T20:18:06
| 158,863,080
| 1
| 1
|
MIT
| 2018-11-23T17:50:04
| 2018-11-23T17:50:04
| null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
# -*- coding: utf-8 -*-
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Snippets related widgets."""
from .snippetsconfig import (
SnippetModelsProxy, SnippetTable, SUPPORTED_LANGUAGES_PY, PYTHON_POS)
|
[
"ccordoba12@gmail.com"
] |
ccordoba12@gmail.com
|
075faaca072840771480c8dad744b6400d118856
|
6268655719a46c9d2b6b38ea21babd8b877724dd
|
/ecom/urls.py
|
c96fa0cafc244270befb0361102e589c71c8180a
|
[] |
no_license
|
MahmudulHassan5809/Ecommerce-Django
|
f84b968621eed61fdf08c55cd43c7a09d8bc8ba7
|
f416536a6b5ce583283139e7271f3fcd1da49739
|
refs/heads/master
| 2022-12-31T15:39:34.405140
| 2020-10-24T18:15:38
| 2020-10-24T18:15:38
| 292,297,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
from django.urls import reverse_lazy
from django.views.generic.base import TemplateView
app_name = "ecom"
urlpatterns = [
path('', views.HomeView.as_view(), name="home"),
path('category/<str:category_slug>/<int:category_id>/',
views.CategoryView.as_view(), name="category_view"),
path('category/product/filter/<int:category_id>/',
views.CategoryFilterView.as_view(), name='category_filter'),
path('search/product/',
views.CategoryFilterView.as_view(), name='search_product'),
path('sub-category/product/<int:category_id>/<int:subcat_id>/',
views.CategoryFilterView.as_view(), name='subcategory_product'),
path('product/detail/<str:product_slug>/<int:pk>/',
views.ProductDetailView.as_view(), name='product_detail'),
path('add-wishlist/<int:product_id>/',
views.AddWishListView.as_view(), name='add_wishlist'),
path('remove-wishlist/<int:product_id>/',
views.RemoveWishListView.as_view(), name='remove_wishlist'),
path('add-compare/<int:product_id>/',
views.AddCompareView.as_view(), name='add_compare'),
path('remove-compare/<int:product_id>/',
views.RemoveCompareView.as_view(), name='remove_compare'),
path('product/rating/<int:product_id>/',
views.ProductReviewView.as_view(), name='product_review')
]
|
[
"mahmudul.hassan240@gmail.com"
] |
mahmudul.hassan240@gmail.com
|
775f151f9bac97b1672a3701d47cd1066bbde102
|
b23d627c04402ffaafdf6bf3af4e40ee027d015b
|
/viscum/scripting/exception.py
|
f85e17d80f46d223cc08a7da940581e59b8d6986
|
[
"MIT"
] |
permissive
|
brunosmmm/viscum
|
fad2e26f33eab74165633905144d6e8ccd205fb9
|
a6b90ae6203998fc016ef89972a3b5d6cf441eb0
|
refs/heads/master
| 2021-01-17T11:16:08.725747
| 2018-03-23T13:59:44
| 2018-03-23T13:59:44
| 55,721,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
"""Scripting Exceptions."""
class InvalidModuleError(Exception):
"""Invalid module."""
pass
class DeferScriptLoading(Exception):
"""Defer script loading."""
pass
class ScriptSyntaxError(Exception):
"""Script syntax error."""
pass
class CancelScriptLoading(Exception):
"""Cancel script loading process."""
pass
|
[
"brunosmmm@gmail.com"
] |
brunosmmm@gmail.com
|
ddc9ee2417f9490178e8cb2ea3a9cf5a360d9328
|
53e2254b83ac5ac71ff390a7c77070ff97b31c0b
|
/max_subarray.py
|
234927450cf501f40718aefbeab37a510db496a4
|
[] |
no_license
|
Ponkiruthika112/codekataset1
|
83a02b96a6b35c33ae7c5a6d6b21c34e63a7eef4
|
4f164864a59e22122b647dd62d36d24e7ace7dac
|
refs/heads/master
| 2020-04-15T04:58:44.427824
| 2019-09-06T10:10:12
| 2019-09-06T10:10:12
| 164,404,367
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
def subarray(s):
l=[" "]
for i in range(0,len(s)):
for j in range(i+1,len(s)+1):
l.append(s[i:j])
return l
a=input()
b=input()
x=subarray(a)
y=subarray(b)
d=[]
for i in x:
if y.count(i)!=0:
d.append([len(i),i])
d.sort(reverse=True)
print(d[0][1])
#subarray
|
[
"noreply@github.com"
] |
Ponkiruthika112.noreply@github.com
|
3b2c7bf4b9c033f46fa9264303c0326bb30d648c
|
12c2168d1b2db8de3246f59e8f911a0a40ec0512
|
/Produto/forms.py
|
b8a944f8220f46ce172ea1d92eea43233709754f
|
[] |
no_license
|
carlafcf/BD_TADS
|
1bc145aa8668f994ec45fb8dc20c0505a86cbbc5
|
72e835a281dade32072c4715d91825ed8b7483ca
|
refs/heads/master
| 2023-04-03T07:54:48.646902
| 2021-03-30T00:06:36
| 2021-03-30T00:06:36
| 341,566,522
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
from django import forms
from django.db import connection
from django.core.exceptions import ValidationError
from .models import Produto
class ProdutoForm(forms.ModelForm):
class Meta:
model = Produto
fields = ['nome', 'descricao', 'fornecedor', 'quantidade_maxima',
'valor_unitario', 'licitacao', 'no_item']
def clean(self):
cleaned_data = super().clean()
# Pega o nome que foi adicionado no formulário
nome = cleaned_data.get("nome")
# Seleciona se há produtos com este mesmo nome
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM Produto_produto WHERE nome=%s", [nome])
resultado_produto = cursor.fetchall()
# Se a lista não foi vazia, há produto com o mesmo nome
if (len(resultado_produto) != 0):
raise ValidationError("Já foi criado um produto com este nome. Escolha outro nome.")
|
[
"carlafcf@gmail.com"
] |
carlafcf@gmail.com
|
2c4578d7aad69ef2eb58b0b9ef7d419426c3e8b0
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/1393.py
|
7306be9c406116a4b86b2512dbd82fc3a7f4b436
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
def compare_list(l1, l2):
l = []
for i,j in zip(l1,l2):
if i > j:
l.append(1)
else:
l.append(-1)
return l
def dwar(w1, w2):
w1.sort()
w2.sort()
while len(w1) > 0:
l = compare_list(w1, w2)
lset = list(set(l))
if len(lset) == 1 and lset[0] == 1:
return len(w1)
w1.pop(0)
w2.pop(-1)
return 0
# def dchoose_block(w1, w2):
# # naomi cheats, arranges ken's block from big to small and let him win initially
# # we expect w1 and w2 to be sorted
# if
#
def war(w1, w2):
score = 0
w2.sort()
for weight1 in w1:
optimal_weight = choose_block(w2, weight1)
if weight1 > optimal_weight:
score += 1
w2.pop(w2.index(optimal_weight))
return score
def choose_block(w, b):
# we expect w to be sorted
if b > w[-1]:
# use the minimum
return w[0]
# use the minimum that's higher than b
l = [x if x > b else 100 for x in w]
l.sort();
return l[0]
def main():
T = int(raw_input())
for i in range(T):
n = int(raw_input())
w1 = [float(a) for a in raw_input().split(" ")]
w2 = [float(a) for a in raw_input().split(" ")]
ww1 = w1[:]
ww2 = w2[:]
w1.sort()
w2.sort()
print("Case #%d: %d %d" % (i+1, dwar(w1, w2), war(ww1, ww2)))
if __name__ == "__main__":
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
143ded849c4e7c0e8ca61a4374f43a742eb7fd22
|
9c84378e88df12a83d3ca6dde5d16b76e3778a1b
|
/appengine/gce-backend/handlers_queues.py
|
a4b113c55398b459ac8e4955fe494dcd81c42646
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
eakuefner/luci-py
|
681364457a43724965ee70168354e1c097e4d3df
|
d9a337e2fd5151eac24b3164963e086091d769a3
|
refs/heads/master
| 2021-01-15T14:58:37.310142
| 2015-10-06T19:08:08
| 2015-10-06T19:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
# Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Task queues for the GCE Backend."""
import json
import logging
from google.appengine.ext import ndb
import webapp2
from components import decorators
from components import machine_provider
from components import net
import models
@ndb.transactional
def uncatalog_instances(instances):
"""Uncatalogs cataloged instances.
Args:
instances: List of instance names to uncatalog.
"""
put_futures = []
get_futures = [
models.Instance.generate_key(instance_name).get_async()
for instance_name in instances
]
while get_futures:
ndb.Future.wait_any(get_futures)
instances = [future.get_result() for future in get_futures if future.done()]
get_futures = [future for future in get_futures if not future.done()]
for instance in instances:
if instance.state == models.InstanceStates.CATALOGED:
# handlers_cron.py sets each Instance's state to
# CATALOGED before triggering InstanceGroupCataloger.
logging.info('Uncataloging instance: %s', instance.name)
instance.state = models.InstanceStates.UNCATALOGED
put_futures.append(instance.put_async())
else:
logging.info('Ignoring already uncataloged instance: %s', instance.name)
if put_futures:
ndb.Future.wait_all(put_futures)
else:
logging.info('Nothing to uncatalog')
class InstanceGroupCataloger(webapp2.RequestHandler):
"""Worker for cataloging instance groups."""
@decorators.require_taskqueue('catalog-instance-group')
def post(self):
"""Reclaim a machine.
Params:
dimensions: JSON-encoded string representation of
machine_provider.Dimensions describing the members of the instance
group.
instances: JSON-encoded list of instances in the instance group to
catalog:
policies: JSON-encoded string representation of machine_provider.Policies
governing the members of the instance group.
"""
dimensions = json.loads(self.request.get('dimensions'))
instances = json.loads(self.request.get('instances'))
policies = json.loads(self.request.get('policies'))
requests = []
instances_to_uncatalog = set()
for instance_name in instances:
instances_to_uncatalog.add(instance_name)
requests.append({
'dimensions': dimensions.copy(), 'policies': policies})
requests[-1]['dimensions']['hostname'] = instance_name
try:
responses = machine_provider.add_machines(requests).get('responses', {})
except net.Error as e:
logging.warning(e)
responses = {}
for response in responses:
request = response.get('machine_addition_request', {})
error = response.get('error')
instance_name = request.get('dimensions', {}).get('hostname')
if instance_name in instances:
if not error:
logging.info('Instance added to Catalog: %s', instance_name)
instances_to_uncatalog.discard(instance_name)
elif error == 'HOSTNAME_REUSE':
logging.warning('Hostname reuse in Catalog: %s', instance_name)
instances_to_uncatalog.discard(instance_name)
else:
logging.warning('Instance not added to Catalog: %s', instance_name)
else:
logging.info('Unknown instance: %s', instance_name)
uncatalog_instances(instances_to_uncatalog)
def create_queues_app():
return webapp2.WSGIApplication([
('/internal/queues/catalog-instance-group', InstanceGroupCataloger),
])
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
a9af732e804394d5d2b35f5a479a30695122d13a
|
46ef191ca0c170ca1d8afc5eb5134de52eba15f1
|
/abc167/venv/D.py
|
52607613479c21ed6d9f20b94042a0ff10aeb2a9
|
[] |
no_license
|
anthonyouch/Competitive-Programming-
|
9a84cd7ff4b816d2e7ece4e4d6438dbeb23f5795
|
39109a7be1cd007bd0080a9694ac256efc10eab9
|
refs/heads/master
| 2023-03-04T00:49:00.688118
| 2021-02-05T13:19:46
| 2021-02-05T13:19:46
| 334,131,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# create a list that eventually goes back to 1
import sys
n, k = [int(i) for i in input().split()]
lst = [int(i) for i in input().split()]
lst.insert(0, 0)
path = [1]
curr = 1
visited = set()
while True:
if lst[curr] in visited:
path.append(lst[curr])
#print(path)
index_val = path.index(lst[curr])
pre_path = path[:index_val + 1]
path = path[index_val + 1:]
break
visited.add(lst[curr])
path.append(lst[curr])
curr = lst[curr]
if k <= len(pre_path):
print(pre_path[k])
sys.exit()
else:
k-= (len(pre_path) - 1)
remainder = k % len(path)
#print(pre_path)
#print(path)
print(path[remainder - 1 ])
|
[
"anthonyouch.programmer@gmail.com"
] |
anthonyouch.programmer@gmail.com
|
96db5839bb144bc4546626c155142610d2a4061a
|
87d13c3c1e4d37909a584ae5be5abd5576dafb9b
|
/backend/todos/migrations/0001_initial.py
|
34753a9ef9bd73a7895aee22b6ad24d76e98806f
|
[] |
no_license
|
Tanmoy-Sarkar/Todo-App-with-Django-Rest-Framework
|
8c5a6fcf2e5d6d15bcb8acbc421aefb0b9e5519d
|
d8dc88968a94c74b6d3dab008abdab68088aacb6
|
refs/heads/master
| 2023-07-29T00:28:51.198787
| 2020-08-12T05:51:24
| 2020-08-12T05:51:24
| 278,842,084
| 0
| 0
| null | 2021-09-22T19:30:38
| 2020-07-11T10:48:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Generated by Django 3.0.8 on 2020-07-11 10:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('body', models.TextField()),
],
),
]
|
[
"tanmoy9920@gmail.com"
] |
tanmoy9920@gmail.com
|
fb845fdfab1ea433b53665abb0f88557207567d7
|
6ff4671a00db5b5b97eea71f80b30dd4ff3ca020
|
/Notebooks/Stride_testing.py
|
1076f13f26c8dce5fb0bff66b691279c729f93ca
|
[
"MIT"
] |
permissive
|
jason-neal/equanimous-octo-tribble
|
36cbe912282bb9210a8fc4e959795bbda1a5f1e4
|
a8788909331034725afe38ae96c83584b17c9fbd
|
refs/heads/master
| 2021-01-23T19:57:05.022746
| 2018-07-18T21:37:27
| 2018-07-18T21:37:27
| 46,361,899
| 1
| 1
|
MIT
| 2020-06-11T09:35:48
| 2015-11-17T17:00:51
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,037
|
py
|
# coding: utf-8
# # Testing numpy Stride
# For snr calculation windowing
# In[21]:
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from astropy.io import fits
from numpy.lib import stride_tricks
get_ipython().magic('matplotlib inline')
# In[22]:
fname = "Test_spectra.fits"
data = fits.getdata(fname)
hdr = fits.getheader(fname)
wl = data["Wavelength"]
I = data["Extracted_DRACS"]
# print(type(I))
print(I.dtype)
wl = np.array(wl, dtype="float64") # Turn >f4 into float64
I = np.array(I, dtype="float64") # Turn >f4 into float64
print(I.dtype)
print(I)
# In[ ]:
binsize = 100
# Try using stride on np.array
# striding
nums = np.arange(len(I), dtype="int")
print("itemsize", nums.itemsize, "dtype", nums.dtype)
hop_length = 1
# stride_tests with numbers
frame_length = binsize
num_frames = 1 + (len(nums) - frame_length) / hop_length
row_stride = nums.itemsize * hop_length # *hopesize
print(frame_length)
print(num_frames)
print(row_stride)
col_stride = nums.itemsize
nums_strided = stride_tricks.as_strided(nums, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("nums", nums)
print("nums_strided =", nums_strided)
# row wise transform
row_sum = np.sum(nums_strided, axis=1)
# print(row_sum)
snr = 1 / np.std(nums_strided, axis=1)
print(snr)
# In[ ]:
# with I
frame_length = binsize
num_frames = 1 + (len(I) - frame_length) / hop_length
row_stride = I.itemsize * hop_length # *hopesize
print(frame_length)
print(num_frames)
print(row_stride)
col_stride = I.itemsize
I_strided = stride_tricks.as_strided(I, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
# print("nums", I)
# print("nums_strided =", I_strided)
snr = 1 / np.std(I_strided, axis=1)
print(snr)
# In[ ]:
plt.plot(snr)
plt.show()
# In[23]:
def strided_snr(data, frame_length, hop_length=1):
num_frames = 1 + (len(data) - frame_length)/hop_length
row_stride = data.itemsize * hop_length # *hopesize
col_stride = data.itemsize
data_strided = stride_tricks.as_strided(data, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("length of data_strided", len(data_strided))
snr = 1/np.std( data_strided, axis=1)
# print("frame_length", frame_length)
# print("num_frames", num_frames)
# print("len(snr)", len(snr))
# print(snr)
# zeropad to make uniform length of spectra
missing_size = len(data) - len(snr)
print("missing size", missing_size)
before = missing_size // 2
end = missing_size // 2
if missing_size % 2 is not 0:
print("missing size is not even")
padded_snr = np.pad(snr, (before, end), "constant")
# print("padded length", len(padded_snr))
# print(padded_snr)
return padded_snr
def strided_sum(data, frame_length, hop_length=1):
num_frames = 1 + (len(data) - frame_length) / hop_length
row_stride = data.itemsize * hop_length # *hopesize
col_stride = data.itemsize
data_strided = stride_tricks.as_strided(data, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("length of data_strided", len(data_strided))
print("binsize", frame_length)
print("hop_length", hop_length)
print(data_strided)
total = np.sum(data_strided, axis=1)
# print("frame_length", frame_length)
# print("num_frames", num_frames)
# print("len(snr)", len(snr))
# print(snr)
# zeropad to make uniform length of spectra
missing_size = len(data) - len(total)
pad_size = (len(data) - len(total)) // 2
# print("missing size", missing_size)
before = missing_size // 2
end = missing_size // 2
if missing_size % 2 is not 0:
print("missing size is not even")
padded_total = np.pad(total, (pad_size, pad_size), "constant")
# print("padded length", len(padded_snr))
# print(padded_snr)
return padded_total
# This doesn't seem to work that well with pandas not sure why
# store_array = np.empty((1024, len(bins)), dtype=data.dtype)
# for i, bin in enumerate(bins):
# store_array[:, i] = strided_snr(I, bin)
# In[30]:
# loop over the different bin sizes
bins = np.arange(3, 51, 2)
hopper = 1
store_list = []
for i, b in enumerate(bins):
store_list.append(strided_snr(I, b, hop_length=hopper))
print("done")
# In[31]:
# print(store_array)
print(store_list)
# In[32]:
# turn into a pandas dataframe
# dataframe = pd.DataFrame(data=store_array, columns=range(1024), index=bins)
# dataframe = pd.DataFrame(store_array, index=bins, columns=list(range(1024)))
# print(dataframe)
# print(dataframe.dtypes)
# In[33]:
df_list = pd.DataFrame(store_list, index=bins, columns=np.round(wl, 2))
print(df_list)
# In[36]:
sns.set()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(store_list, cmap=cmap, xticklabels=200, vmax=300, vmin=10)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize", xlabel="Wavelenght")
# In[37]:
# seaborn heatmap plot
sns.set()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(df_list, xticklabels=200, vmax=300, vmin=10)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize",
xlabel="Wavelenght")
# In[35]:
# ax = sns.heatmap(store_list)
wl[50]-wl[0]
# In[ ]:
# # test on known data
# In[17]:
data = np.arange(20)
binsizes = range(1, 6, 2)
store = []
# opt = np.get_printoptions()
# np.set_printoptions(threshold='nan')
for b in binsizes:
store.append(strided_sum(data, b))
# np.set_printoptions(**opt)
# In[18]:
SNRrand = pd.DataFrame(store, index=binsizes)
print(SNRrand)
# In[19]:
sns.set()
# cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(SNRrand, xticklabels=20)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize",
xlabel="Wavelenght")
# In[ ]:
# In[ ]:
|
[
"jason.neal@astro.up.pt"
] |
jason.neal@astro.up.pt
|
37712452ff5adaa4113178fb9c5623c3e941fee9
|
d67bd00f8fe819bd3011ce154c19cbc765d59f1d
|
/branches/4.0_buildout/openlegis/sagl/skins/consultas/parlamentar/vereadores_atuais_json.py
|
aac059d617ea5f4ce9e532f0b3e6b9cfb8903a88
|
[] |
no_license
|
openlegis-br/sagl
|
90f87bdbbaa8a6efe0ccb5691ea8424575288c46
|
eabf7529eefe13a53ed088250d179a92218af1ed
|
refs/heads/master
| 2023-08-31T12:29:39.382474
| 2023-08-29T16:12:01
| 2023-08-29T16:12:01
| 32,593,838
| 17
| 1
| null | 2023-08-29T06:16:55
| 2015-03-20T16:11:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
## Script (Python) "vereadores_atuais"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=
##title=
##
import simplejson as json
context.REQUEST.RESPONSE.setHeader("Access-Control-Allow-Origin", "*")
request=context.REQUEST
for item in context.zsql.legislatura_atual_obter_zsql():
num_legislatura = item.num_legislatura
data_atual = DateTime().strftime("%d/%m/%Y")
lista_exercicio = []
exercicio = []
for item in context.zsql.autores_obter_zsql(txt_dat_apresentacao=data_atual):
dic = {}
dic['cod_parlamentar'] = item.cod_parlamentar
dic['nom_parlamentar'] = item.nom_parlamentar
dic['nom_completo'] = item.nom_completo
foto = str(item.cod_parlamentar) + "_foto_parlamentar"
if hasattr(context.sapl_documentos.parlamentar.fotos, foto):
dic['foto'] = request.SERVER_URL + '/sapl_documentos/parlamentar/fotos/' + foto
else:
dic['foto'] = request.SERVER_URL + '/imagens/avatar.png'
dic['link'] = request.SERVER_URL + '/consultas/parlamentar/parlamentar_mostrar_proc?cod_parlamentar=' + item.cod_parlamentar + '%26iframe=1'
dic['partido'] = ''
for filiacao in context.zsql.parlamentar_data_filiacao_obter_zsql(num_legislatura=num_legislatura, cod_parlamentar=item.cod_parlamentar):
if filiacao.dat_filiacao != '0' and filiacao.dat_filiacao != None:
for partido in context.zsql.parlamentar_partido_obter_zsql(dat_filiacao=filiacao.dat_filiacao, cod_parlamentar=item.cod_parlamentar):
dic['partido'] = partido.sgl_partido
lista_exercicio.append(dic)
lista_exercicio.sort(key=lambda dic: dic['nom_completo'])
#listaVereador={}
#listaVereador.update({'vereadores': lista_exercicio})
return json.dumps(lista_exercicio)
|
[
"contato@openlegis.com.br"
] |
contato@openlegis.com.br
|
bf6f76dc39e4234d6a8f1eaad9548249a8dc530d
|
a7947a129fa5318517f35f17163840f24178d6aa
|
/examples/core_geometry_bspline.py
|
c254afdd34a3fe70426aefe3c789019e5e6cad4d
|
[] |
no_license
|
fboussuge/pythonocc-demos
|
993abe7634ab74fc3619fea12519c176b4e26658
|
8f9756653eaecc505238d43fa22a0057bbd14b56
|
refs/heads/master
| 2021-06-23T01:29:29.611505
| 2020-12-08T13:49:04
| 2020-12-08T13:49:04
| 156,134,578
| 0
| 0
| null | 2018-11-04T23:17:31
| 2018-11-04T23:17:30
| null |
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
#!/usr/bin/env python
##Copyright 2009-2014 Jelle Feringa (jelleferinga@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from OCC.Core.gp import gp_Pnt2d
from OCC.Core.Geom2dAPI import Geom2dAPI_Interpolate, Geom2dAPI_PointsToBSpline
from OCC.Core.TColgp import TColgp_HArray1OfPnt2d, TColgp_Array1OfPnt2d
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
def bspline():
# the first bspline
array = TColgp_Array1OfPnt2d(1, 5)
array.SetValue(1, gp_Pnt2d(0, 0))
array.SetValue(2, gp_Pnt2d(1, 2))
array.SetValue(3, gp_Pnt2d(2, 3))
array.SetValue(4, gp_Pnt2d(4, 3))
array.SetValue(5, gp_Pnt2d(5, 5))
bspline_1 = Geom2dAPI_PointsToBSpline(array).Curve()
# the second one
harray = TColgp_HArray1OfPnt2d(1, 5)
harray.SetValue(1, gp_Pnt2d(0, 0))
harray.SetValue(2, gp_Pnt2d(1, 2))
harray.SetValue(3, gp_Pnt2d(2, 3))
harray.SetValue(4, gp_Pnt2d(4, 3))
harray.SetValue(5, gp_Pnt2d(5, 5))
anInterpolation = Geom2dAPI_Interpolate(harray.GetHandle(), False, 0.01)
anInterpolation.Perform()
bspline_2 = anInterpolation.Curve()
harray2 = TColgp_HArray1OfPnt2d(1, 5)
harray2.SetValue(1, gp_Pnt2d(11, 0))
harray2.SetValue(2, gp_Pnt2d(12, 2))
harray2.SetValue(3, gp_Pnt2d(13, 3))
harray2.SetValue(4, gp_Pnt2d(15, 3))
harray2.SetValue(5, gp_Pnt2d(16, 5))
anInterpolation2 = Geom2dAPI_Interpolate(harray.GetHandle(), True, 0.01)
anInterpolation2.Perform()
bspline_3 = anInterpolation2.Curve()
for j in range(array.Lower(), array.Upper()+1):
p = array.Value(j)
display.DisplayShape(p, update=False)
for j in range(harray.Lower(), harray.Upper()+1):
p = harray.Value(j)
display.DisplayShape(p, update=False)
display.DisplayShape(bspline_1, update=False)
display.DisplayShape(bspline_2, update=False, color='GREEN')
display.DisplayShape(bspline_3, update=True, color='BLUE')
if __name__ == '__main__':
bspline()
start_display()
|
[
"tpaviot@gmail.com"
] |
tpaviot@gmail.com
|
e08f90549f8a32c66d9622898dd5fc889d376b1d
|
57c570d1b5a621158d8763f935e2069be6b8c90a
|
/tykj-operation/tykj-operation/MarketSearchCrawler/services/db.py
|
48e2998f975d9b156c3edad34d84c9fd20d44542
|
[] |
no_license
|
liuliainio/liuli
|
e011decf45f7eca7009a12ad4a96f33a17055945
|
203fbf4f135efb6432c77b937633003ce2f2c9a2
|
refs/heads/master
| 2021-01-10T20:35:08.070770
| 2018-08-21T05:52:59
| 2018-08-21T05:52:59
| 25,625,853
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
#-*- coding: utf-8 -*-
'''
Created on Sep 12, 2013
@author: gmliao
'''
from crawler import settings
import MySQLdb
class MySQLdbWrapper:
conn = None
def connect(self):
self.conn = MySQLdb.connect(settings.DATABASES['default'][0], settings.DATABASES['default'][1],
settings.DATABASES['default'][2], settings.DATABASES['default'][3],
charset='utf8', use_unicode=True)
#self.conn = MySQLdb.connect('localhost', 'root', '1111', 'market')
self.conn.set_character_set('utf8')
def reconnect(self):
self.conn = None
def cursor(self):
try:
if not self.conn:
self.connect()
return self.conn.cursor()
except MySQLdb.OperationalError:
self.connect()
return self.conn.cursor()
|
[
"liuliainio@163.com"
] |
liuliainio@163.com
|
2d39f028eeabb883b01ffc250ab1059e0f677292
|
0aa2db201678205e9eccd3f4f2dcb6f95a97b5f6
|
/tut_42.py
|
22a41be2b48cd8deb77c4a835b8a9d4c9ca6fee1
|
[] |
no_license
|
udoy382/PyTutorial_telusko
|
ffa76b4b6772d289c787e4b682df2d0965a2bf62
|
5dc5f3fc331605310f7c3923d7865f55a4592e28
|
refs/heads/main
| 2023-06-09T11:00:41.915456
| 2021-06-30T14:29:56
| 2021-06-30T14:29:56
| 381,730,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
# this is our simple functions
def square(a):
return a*a
result = square(5)
print(result)
# this is our lambda functions
f = lambda j,k : j*k
print(f(4, 5))
|
[
"srudoy436@gmail.com"
] |
srudoy436@gmail.com
|
5ce02b22a691c4e7bbbb7c9b5b276d863edba49d
|
486fa0a987ab1648de91efeb4b7ba8be3dd6b016
|
/TermTk/TTkCore/TTkTerm/__init__.py
|
22928704063777cde14e1466e9c3a9c63600d837
|
[
"MIT"
] |
permissive
|
ceccopierangiolieugenio/pyTermTk
|
9f5103d6af9e93fe2572b61486919020d2007550
|
f9c2a4d97f2cd04f0b86cf10661f63a61edae48e
|
refs/heads/main
| 2023-08-30T20:58:39.239718
| 2023-08-02T22:51:02
| 2023-08-02T22:51:02
| 339,475,110
| 414
| 16
|
MIT
| 2023-08-31T23:16:10
| 2021-02-16T17:23:36
|
Python
|
UTF-8
|
Python
| false
| false
| 176
|
py
|
from .inputkey import TTkKeyEvent
from .inputmouse import TTkMouseEvent
from .colors import TTkTermColor
from .term import TTkTerm
from .input import TTkInput
|
[
"ceccopierangiolieugenio@googlemail.com"
] |
ceccopierangiolieugenio@googlemail.com
|
2be7fcff78fe289f0631a2d1fdca66d77e9dac22
|
fe19d2fac4580d463132e61509bd6e3cc2cf958d
|
/toontown/safezone/PicnicBasket.py
|
30d2bfbd1a1e067efa18df7add4622d6558f99b5
|
[] |
no_license
|
t00nt0wn1dk/c0d3
|
3e6db6dd42c3aa36ad77709cf9016176a3f3a44f
|
7de105d7f3de0f8704b020e32fd063ee2fad8d0d
|
refs/heads/master
| 2021-01-01T16:00:15.367822
| 2015-03-21T21:25:52
| 2015-03-21T21:25:55
| 32,647,654
| 3
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,319
|
py
|
# 2013.08.22 22:24:42 Pacific Daylight Time
# Embedded file name: toontown.safezone.PicnicBasket
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from direct.showbase import PythonUtil
class PicnicBasket(StateData.StateData):
__module__ = __name__
def __init__(self, safeZone, parentFSM, doneEvent, tableNumber, seatNumber):
StateData.StateData.__init__(self, doneEvent)
self.tableNumber = tableNumber
self.seatNumber = seatNumber
self.fsm = ClassicFSM.ClassicFSM('PicnicBasket', [State.State('start', self.enterStart, self.exitStart, ['requestBoard', 'trolleyHFA', 'trolleyTFA']),
State.State('trolleyHFA', self.enterTrolleyHFA, self.exitTrolleyHFA, ['final']),
State.State('trolleyTFA', self.enterTrolleyTFA, self.exitTrolleyTFA, ['final']),
State.State('requestBoard', self.enterRequestBoard, self.exitRequestBoard, ['boarding']),
State.State('boarding', self.enterBoarding, self.exitBoarding, ['boarded']),
State.State('boarded', self.enterBoarded, self.exitBoarded, ['requestExit',
'trolleyLeaving',
'final',
'exiting']),
State.State('requestExit', self.enterRequestExit, self.exitRequestExit, ['exiting', 'trolleyLeaving']),
State.State('trolleyLeaving', self.enterTrolleyLeaving, self.exitTrolleyLeaving, ['final']),
State.State('exiting', self.enterExiting, self.exitExiting, ['final']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.parentFSM = parentFSM
return None
def load(self):
self.parentFSM.getStateNamed('picnicBasketBlock').addChild(self.fsm)
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.upButton = self.buttonModels.find('**//InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
def unload(self):
self.parentFSM.getStateNamed('trolley').removeChild(self.fsm)
del self.fsm
del self.parentFSM
self.buttonModels.removeNode()
del self.buttonModels
del self.upButton
del self.downButton
del self.rolloverButton
def enter(self):
self.fsm.enterInitialState()
if base.localAvatar.hp > 0:
messenger.send('enterPicnicTableOK_%d_%d' % (self.tableNumber, self.seatNumber))
self.fsm.request('requestBoard')
else:
self.fsm.request('trolleyHFA')
return None
def exit(self):
self.ignoreAll()
return None
def enterStart(self):
return None
def exitStart(self):
return None
def enterTrolleyHFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyHFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyHFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def enterTrolleyTFA(self):
self.noTrolleyBox = TTDialog.TTGlobalDialog(message=TTLocalizer.TrolleyTFAMessage, doneEvent='noTrolleyAck', style=TTDialog.Acknowledge)
self.noTrolleyBox.show()
base.localAvatar.b_setAnimState('neutral', 1)
self.accept('noTrolleyAck', self.__handleNoTrolleyAck)
def exitTrolleyTFA(self):
self.ignore('noTrolleyAck')
self.noTrolleyBox.cleanup()
del self.noTrolleyBox
def __handleNoTrolleyAck(self):
ntbDoneStatus = self.noTrolleyBox.doneStatus
if ntbDoneStatus == 'ok':
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
else:
self.notify.error('Unrecognized doneStatus: ' + str(ntbDoneStatus))
def enterRequestBoard(self):
return None
def handleRejectBoard(self):
doneStatus = {}
doneStatus['mode'] = 'reject'
messenger.send(self.doneEvent, [doneStatus])
def exitRequestBoard(self):
return None
def enterBoarding(self, nodePath, side):
camera.wrtReparentTo(nodePath)
heading = PythonUtil.fitDestAngle2Src(camera.getH(nodePath), 90 * side)
self.cameraBoardTrack = LerpPosHprInterval(camera, 1.5, Point3(14.4072 * side, 0, 3.8667), Point3(heading, -15, 0))
self.cameraBoardTrack.start()
return None
def exitBoarding(self):
self.ignore('boardedTrolley')
return None
def enterBoarded(self):
self.enableExitButton()
return None
def exitBoarded(self):
self.cameraBoardTrack.finish()
self.disableExitButton()
return None
def enableExitButton(self):
self.exitButton = DirectButton(relief=None, text=TTLocalizer.TrolleyHopOff, text_fg=(1, 1, 0.65, 1), text_pos=(0, -0.23), text_scale=0.8, image=(self.upButton, self.downButton, self.rolloverButton), image_color=(1, 0, 0, 1), image_scale=(20, 1, 11), pos=(0, 0, 0.8), scale=0.15, command=lambda self = self: self.fsm.request('requestExit'))
return
def disableExitButton(self):
self.exitButton.destroy()
def enterRequestExit(self):
messenger.send('trolleyExitButton')
return None
def exitRequestExit(self):
return None
def enterTrolleyLeaving(self):
self.acceptOnce('playMinigame', self.handlePlayMinigame)
self.acceptOnce('picnicDone', self.handlePicnicDone)
return None
def handlePlayMinigame(self, zoneId, minigameId):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
doneStatus = {}
doneStatus['mode'] = 'minigame'
doneStatus['zoneId'] = zoneId
doneStatus['minigameId'] = minigameId
messenger.send(self.doneEvent, [doneStatus])
def handlePicnicDone(self):
doneStatus = {}
doneStatus['mode'] = 'exit'
messenger.send(self.doneEvent, [doneStatus])
def exitTrolleyLeaving(self):
self.ignore('playMinigame')
taskMgr.remove('leavingCamera')
return self.notify.debug('handling golf kart done event')
def enterExiting(self):
return None
def handleOffTrolley(self):
doneStatus = {}
doneStatus['mode'] = 'exit'
messenger.send(self.doneEvent, [doneStatus])
return None
def exitExiting(self):
return None
def enterFinal(self):
return None
def exitFinal(self):
return None
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\safezone\PicnicBasket.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:24:43 Pacific Daylight Time
|
[
"anonymoustoontown@gmail.com"
] |
anonymoustoontown@gmail.com
|
01dabdc5ab680e0a4c9bb2dcef1040d08b4915eb
|
f56a00622ea3799f25d52138ffaafc6dcad46574
|
/aggtrend/aggtrends/migrations/0003_code_post_code_post.py
|
45ea624a140d69851dd7bf0cd4e284f631260bcf
|
[] |
no_license
|
SardarDawar/aggregate
|
063b384421ef1f3b5c8d1eb1975cd8396d38f553
|
b062023bc2d3e6fdeb1c17743345cb8b70f90b1c
|
refs/heads/master
| 2022-12-29T05:09:16.438663
| 2019-12-30T22:49:56
| 2019-12-30T22:49:56
| 230,547,601
| 1
| 0
| null | 2019-12-30T10:25:30
| 2019-12-28T02:31:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
# Generated by Django 2.2.6 on 2019-12-30 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aggtrends', '0002_auto_20191227_0614'),
]
operations = [
migrations.AddField(
model_name='code_post',
name='code_post',
field=models.BooleanField(default=False),
),
]
|
[
"dawarsardar786@gmail.com"
] |
dawarsardar786@gmail.com
|
1c51a3fc01837f6b1233b786c73615dcb572c8c7
|
624ccdaf85ebebf0a03636fbd1ff234bd89c7972
|
/product/serializers.py
|
75f8d0097fb3b1ce82d5f1f29e448da23a3cf786
|
[] |
no_license
|
kairat3/bella_basket
|
613294985c1f71efdee9d0f43fa2557a412ca9b8
|
e76f3f950957ae4051472d374ccee9350def6cd1
|
refs/heads/master
| 2023-07-13T00:13:18.475695
| 2021-08-25T09:51:17
| 2021-08-25T09:51:17
| 396,841,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,016
|
py
|
from rest_framework import serializers
from account.serializers import ProfileSerializer, UserSerializer
from .models import Product, Category, Favorite, Color, Size, Additional, Image
class AdditionalSerializer(serializers.ModelSerializer):
class Meta:
model = Additional
fields = ('key', 'value', )
class ColorSerializer(serializers.ModelSerializer):
class Meta:
model = Color
fields = ('id', 'color', )
class SizeSerializer(serializers.ModelSerializer):
class Meta:
model = Size
fields = ('size', )
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Image
fields = ('title', 'image', )
class IsHitSerializer(serializers.ModelSerializer):
images = ImageSerializer(many=True, read_only=True)
color = ColorSerializer(many=True)
size = SizeSerializer(many=True)
class Meta:
model = Product
fields = (
'id', 'is_hit', 'title', 'description', 'old_price', 'price', 'discount', 'additional', 'color', 'size',
'images', 'category')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'title', 'slug',)
def to_representation(self, instance):
representation = super().to_representation(instance)
representation2 = super(ProductSerializer)
print(representation2)
if instance.children.exists():
representation['children'] = CategorySerializer(instance=instance.children.all(), many=True).data
return representation
class ProductSerializer(serializers.ModelSerializer):
additional = AdditionalSerializer(many=True)
color = ColorSerializer(many=True)
size = SizeSerializer(many=True)
images = ImageSerializer(many=True, read_only=True)
category = CategorySerializer()
class Meta:
model = Product
fields = ('id', 'title', 'description', 'old_price', 'price', 'discount', 'additional', 'color', 'size', 'images', 'category', 'is_hit')
class FavoriteSerializer(serializers.ModelSerializer):
product = ProductSerializer(read_only=True)
class Meta:
model = Favorite
fields = ('id', 'favorite', 'user', 'product')
def create(self, validated_data):
request = self.context.get('request')
user = request.user
favorite = Favorite.objects.create(user=user, **validated_data)
return favorite
def to_representation(self, instance):
representation = super(FavoriteSerializer, self).to_representation(instance)
representation['user'] = instance.user.phone_number
return representation
class CartSerializer(serializers.Serializer):
user = ProfileSerializer()
products = ProductSerializer(many=True)
created_at = serializers.DateTimeField()
class AddToCartSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
class Meta:
model = Product
fields = ['id']
|
[
"jvckmiller@gmail.com"
] |
jvckmiller@gmail.com
|
d027b4e8adc33d04712f2639893d0e1b309d38c0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02603/s199763378.py
|
915ed29aa0e93fd701e5b05d825c6c1857d7be8c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
import sys
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
dp = [0]*(N+1)
dp[0] = 1000
for i in range(1, N+1):
dp[i] = dp[i-1]
for j in range(i):
dp[i] = max(dp[i], dp[j]//A[j]*A[i-1]+dp[j]%A[j])
print(dp[N])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f2059742af36092696997c24446f840d262c752a
|
081ea255a45d2e0f255ebab00aea487c1bc01da2
|
/OCP/tasks/task_motion.py
|
01f077468501d4bcbe408f077aad8405ce9c9082
|
[] |
no_license
|
ggory15/HQP-cogimon
|
8b2d906d179864c613d8741fb1997c650feedf3c
|
e809fcc2a421066b7c0c02ce70898ec96ba584af
|
refs/heads/master
| 2022-07-03T01:21:47.831298
| 2020-05-10T22:21:13
| 2020-05-10T22:21:13
| 262,884,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
# __author__ = "Sanghyun Kim"
# __copyright__ = "Copyright (C) 2020 Sanghyun Kim"
import numpy as np
import copy
from .task_abstract import *
class TaskMotion(TaskBase):
def __init__(self, name, robot):
TaskBase.__init__(self, name, robot)
self.mask = 0
def setMask(self, mask):
self.mask = copy.deepcopy(mask)
def hasMase(self):
return self.mask is not 0
|
[
"ggory15@snu.ac.kr"
] |
ggory15@snu.ac.kr
|
e50c7c7cb40e44a0cdcd058a2b011502e8d7cb21
|
93a4edf14cd2284d58fe0218cdce2eac00db66c6
|
/tests/sdfg_validate_names_test.py
|
51490383701f01b1caa015a7e2bb7c6e1b8da622
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
gronerl/dace
|
f50dbeb70feb35c2afb4ee92b2dd4a9613a024ea
|
886e14cfec5df4aa28ff9a5e6c0fe8150570b8c7
|
refs/heads/master
| 2023-07-23T12:30:20.561679
| 2020-02-24T07:25:34
| 2020-02-24T07:25:34
| 242,930,054
| 0
| 0
|
BSD-3-Clause
| 2020-02-25T06:45:23
| 2020-02-25T06:45:22
| null |
UTF-8
|
Python
| false
| false
| 4,584
|
py
|
import unittest
import dace
# Try to detect invalid names in SDFG
class NameValidationTests(unittest.TestCase):
# SDFG label
def test_sdfg_name1(self):
try:
sdfg = dace.SDFG(' ')
sdfg.validate()
self.fail('Failed to detect invalid SDFG')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
def test_sdfg_name2(self):
try:
sdfg = dace.SDFG('3sat')
sdfg.validate()
self.fail('Failed to detect invalid SDFG')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
# State
def test_state_duplication(self):
try:
sdfg = dace.SDFG('ok')
sdfg.add_state('also_ok')
s2 = sdfg.add_state('also_ok')
s2.set_label('also_ok')
sdfg.validate()
self.fail('Failed to detect duplicate state')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
def test_state_name1(self):
try:
sdfg = dace.SDFG('ok')
sdfg.add_state('not ok')
sdfg.validate()
self.fail('Failed to detect invalid state')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
def test_state_name2(self):
try:
sdfg = dace.SDFG('ok')
sdfg.add_state('$5')
sdfg.validate()
self.fail('Failed to detect invalid state')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
# Array
def test_array(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
_8 = state.add_array('8', [1], dace.float32)
t = state.add_tasklet('tasklet', {'a'}, {}, 'print(a)')
state.add_edge(_8, None, t, 'a',
dace.Memlet.from_array(_8.data, _8.desc(sdfg)))
sdfg.validate()
self.fail('Failed to detect invalid array name')
except (dace.sdfg.InvalidSDFGError, NameError) as ex:
print('Exception caught:', ex)
# Tasklet
def test_tasklet(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
A = state.add_array('A', [1], dace.float32)
B = state.add_array('B', [1], dace.float32)
t = state.add_tasklet(' tasklet', {'a'}, {'b'}, 'b = a')
state.add_edge(A, None, t, 'a',
dace.Memlet.from_array(A.data, A.desc(sdfg)))
state.add_edge(t, 'b', B, None,
dace.Memlet.from_array(B.data, B.desc(sdfg)))
sdfg.validate()
self.fail('Failed to detect invalid tasklet name')
except dace.sdfg.InvalidSDFGNodeError as ex:
print('Exception caught:', ex)
# Connector
def test_connector(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
A = state.add_array('A', [1], dace.float32)
B = state.add_array('B', [1], dace.float32)
t = state.add_tasklet('tasklet', {'$a'}, {' b'}, '')
state.add_edge(A, None, t, '$a',
dace.Memlet.from_array(A.data, A.desc(sdfg)))
state.add_edge(t, ' b', B, None,
dace.Memlet.from_array(B.data, B.desc(sdfg)))
sdfg.validate()
self.fail('Failed to detect invalid connectors')
except dace.sdfg.InvalidSDFGError as ex:
print('Exception caught:', ex)
# Interstate edge
def test_interstate_edge(self):
try:
sdfg = dace.SDFG('ok')
state = sdfg.add_state('also_ok')
A = state.add_array('A', [1], dace.float32)
B = state.add_array('B', [1], dace.float32)
t = state.add_tasklet('tasklet', {'a'}, {'b'}, 'b = a')
state.add_edge(A, None, t, 'a',
dace.Memlet.from_array(A.data, A.desc(sdfg)))
state.add_edge(t, 'b', B, None,
dace.Memlet.from_array(B.data, B.desc(sdfg)))
sdfg.add_edge(
state, state, dace.InterstateEdge(assignments={'%5': '1'}))
sdfg.validate()
self.fail('Failed to detect invalid interstate edge')
except dace.sdfg.InvalidSDFGInterstateEdgeError as ex:
print('Exception caught:', ex)
if __name__ == '__main__':
unittest.main()
|
[
"talbn@inf.ethz.ch"
] |
talbn@inf.ethz.ch
|
94125c9112a64584e17d1955f1b02efaedd6fcd0
|
2da8bcfb9a72e507812a8723e38ad6d030c300f1
|
/simplify_path_71.py
|
ac6e8a021f9a97b8369035d457680de94b0e3a0f
|
[] |
no_license
|
aditya-doshatti/Leetcode
|
1a4e0f391a7d6ca2d7f8fdc35e535f4ec10fb634
|
eed20da07896db471ea6582785335e52d4f04f85
|
refs/heads/master
| 2023-04-06T02:18:57.287263
| 2023-03-17T03:08:42
| 2023-03-17T03:08:42
| 218,408,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
'''
71. Simplify Path
Medium
Given a string path, which is an absolute path (starting with a slash '/') to a file or directory in a Unix-style file system, convert it to the simplified canonical path.
In a Unix-style file system, a period '.' refers to the current directory, a double period '..' refers to the directory up a level, and any multiple consecutive slashes (i.e. '//') are treated as a single slash '/'. For this problem, any other format of periods such as '...' are treated as file/directory names.
The canonical path should have the following format:
The path starts with a single slash '/'.
Any two directories are separated by a single slash '/'.
The path does not end with a trailing '/'.
The path only contains the directories on the path from the root directory to the target file or directory (i.e., no period '.' or double period '..')
Return the simplified canonical path.
Example 1:
Input: path = "/home/"
Output: "/home"
Explanation: Note that there is no trailing slash after the last directory name.
https://leetcode.com/problems/simplify-path/
'''
class Solution:
def simplifyPath(self, path: str) -> str:
stack = []
for val in path[1:].split('/'):
if val == '..':
if stack:
stack.pop()
elif val and val !='.':
stack.append(val)
return '/' + '/'.join(stack)
|
[
"aditya.doshatti@sjsu.edu"
] |
aditya.doshatti@sjsu.edu
|
390175f4e92c1ae351811ad85ecce0a6c1de7920
|
241cebd26fbcbd20bae804fd868722b2673328fc
|
/histogram_2002_r75.py
|
64b8fb63166cbdb365a64badb8c95216fdfd1d5c
|
[] |
no_license
|
shouldsee/golly_utils
|
b3339e9ba4e5213e98ec1b35755cd605e3f93df8
|
03959f0c593d4a811ba20f2372d6663d126dbab2
|
refs/heads/master
| 2021-01-19T11:04:25.661858
| 2018-04-01T13:19:51
| 2018-04-01T13:19:51
| 82,230,847
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,914
|
py
|
# Creates a histogram plot showing the frequencies of all cell states
# in the current selection (if one exists) or the entire pattern.
# Author: Andrew Trevorrow (andrew@trevorrow.com), September 2009.
import golly as g
import math
from glife import getminbox, rect, rccw, pattern
from glife.text import make_text
from time import time
# --------------------------------------------------------------------
barwd = 40 # width of each bar
# length of axes
xlen = g.numstates() * barwd
ylen = 500
totalcells = 0
# --------------------------------------------------------------------
def draw_line(x1, y1, x2, y2, state = 1):
# draw a line of cells in given state from x1,y1 to x2,y2
# using Bresenham's algorithm
g.setcell(x1, y1, state)
if x1 == x2 and y1 == y2: return
dx = x2 - x1
ax = abs(dx) * 2
sx = 1
if dx < 0: sx = -1
dy = y2 - y1
ay = abs(dy) * 2
sy = 1
if dy < 0: sy = -1
if ax > ay:
d = ay - (ax / 2)
while x1 != x2:
g.setcell(x1, y1, state)
if d >= 0:
y1 += sy
d -= ax
x1 += sx
d += ay
else:
d = ax - (ay / 2)
while y1 != y2:
g.setcell(x1, y1, state)
if d >= 0:
x1 += sx
d -= ay
y1 += sy
d += ax
g.setcell(x2, y2, state)
# --------------------------------------------------------------------
def color_text(string, extrastate):
t = make_text(string, "mono")
bbox = getminbox(t)
# convert two-state pattern to multi-state and set state to extrastate
mlist = []
tlist = list(t)
for i in xrange(0, len(tlist), 2):
mlist.append(tlist[i])
mlist.append(tlist[i+1])
mlist.append(extrastate)
if len(mlist) % 2 == 0: mlist.append(0)
p = pattern(mlist)
return p, bbox.wd, bbox.ht
# --------------------------------------------------------------------
def draw_bar(state, extrastate):
barht = int( float(ylen) * float(statecount[state]) / float(totalcells) )
x = barwd * state
draw_line(x, 0, x, -barht, extrastate)
draw_line(x, -barht, x+barwd, -barht, extrastate)
draw_line(x+barwd, 0, x+barwd, -barht, extrastate)
if barht > 1:
# fill bar with corresponding color
x1 = x + 1
x2 = x + barwd - 1
for y in xrange(barht - 1):
draw_line(x1, -(y+1), x2, -(y+1), state)
if statecount[state] > 0:
# show count on top of bar
t, twd, tht = color_text(str(statecount[state]), extrastate)
t.put(barwd * (state+1) - barwd/2 - twd/2, -barht - tht - 3)
# --------------------------------------------------------------------
if g.empty(): g.exit("There is no pattern.")
if g.numstates() == 256: g.exit("No room for extra state.")
# check that a layer is available for the histogram
histname = "histogram"
histlayer = -1
for i in xrange(g.numlayers()):
if g.getname(i) == histname:
histlayer = i
break
if histlayer == -1 and g.numlayers() == g.maxlayers():
g.exit("You need to delete a layer.")
# use selection rect if it exists, otherwise use pattern bounds
label = "Selection"
r = rect( g.getselrect() )
if r.empty:
label = "Pattern"
r = rect( g.getrect() )
# count all cell states in r
g.show("Counting cell states...")
counted = 0
totalcells = r.wd * r.ht
statecount = [0] * g.numstates()
oldsecs = time()
for row in xrange(r.top, r.top + r.height):
for col in xrange(r.left, r.left + r.width):
counted += 1
statecount[g.getcell(col,row)] += 1
newsecs = time()
if newsecs - oldsecs >= 1.0: # show % done every sec
oldsecs = newsecs
done = 100.0 * float(counted) / float(totalcells)
g.show("Counting cell states... %.2f%%" % done)
g.dokey( g.getkey() )
statecount=[int(10*math.log((x+1),2)) for x in statecount]
totalcells=sum(statecount)
if statecount[0] == counted: g.exit("Selection is empty.")
# save current layer's info before we switch layers
currname = g.getname()
currcursor = g.getcursor()
currcolors = g.getcolors()
currstates = g.numstates()
deads, deadr, deadg, deadb = g.getcolors(0)
# create histogram in separate layer
g.setoption("stacklayers", 0)
g.setoption("tilelayers", 0)
g.setoption("showlayerbar", 1)
if histlayer == -1:
histlayer = g.addlayer()
else:
g.setlayer(histlayer)
g.new(histname)
g.setcursor(currcursor)
# use a Generations rule so we can append extra state for drawing text & lines
g.setrule("//" + str(currstates+1))
extrastate = currstates
currcolors.append(extrastate)
if (deadr + deadg + deadb) / 3 > 128:
# use black if light background
currcolors.append(0)
currcolors.append(0)
currcolors.append(0)
else:
# use white if dark background
currcolors.append(255)
currcolors.append(255)
currcolors.append(255)
g.setcolors(currcolors)
# draw axes with origin at 0,0
draw_line(0, 0, xlen, 0, extrastate)
draw_line(0, 0, 0, -ylen, extrastate)
# add annotation using mono-spaced ASCII font
t, twd, tht = color_text("Pattern name: "+currname, extrastate)
t.put(0, -ylen - 30 - tht)
t, twd, tht = color_text("%s size: %d x %d (%d cells)" %
(label, r.wd, r.ht, totalcells), extrastate)
t.put(0, -ylen - 15 - tht)
t, twd, tht = color_text("% FREQUENCY", extrastate)
t.put(-35 - tht, -(ylen - twd)/2, rccw)
for perc in xrange(0, 101, 10):
t, twd, tht = color_text(str(perc), extrastate)
y = -perc * (ylen/100)
t.put(-twd - 10, y - tht/2)
### draw_line(-3, y, 0, y, extrastate)
# draw dotted horizontal line from 0 to xlen
for x in xrange(0, xlen, 2): g.setcell(x, y, extrastate)
t, twd, tht = color_text("STATE", extrastate)
t.put((xlen - twd)/2, 30)
for state in xrange(extrastate):
t, twd, tht = color_text(str(state), extrastate)
t.put(barwd * (state+1) - barwd/2 - twd/2, 10)
draw_bar(state, extrastate)
# display result at scale 1:1
g.fit()
g.setmag(0)
g.show("")
|
[
"shouldsee.gem@gmail.com"
] |
shouldsee.gem@gmail.com
|
19605ba78e49b3853aa764606e01d48cf28335f0
|
dc940e2aa628eff693af36584cfad935990ebe7d
|
/v3.1.0/getBookTXT.py
|
4274ac9cf37fbcba256bc4cd576bfa8e5e20c9b8
|
[] |
no_license
|
520wsl/getXs8Novels
|
865572ea488e0bf3d4e21664eb576237b6dd18be
|
ecf6d0bc5dfdbe4b5c3e8a9aac313bf7abce614b
|
refs/heads/master
| 2020-04-18T00:59:56.777416
| 2019-02-15T08:52:11
| 2019-02-15T08:52:11
| 167,101,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '文章抓取'
__author__ = 'Mad Dragon'
__mtime__ = '2019/1/12'
# 我不懂什么叫年少轻狂,只知道胜者为王
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import moment
import time
from tool.GetBookInfoTool import GetBookInfoTool
from tool.SaveBookInfoToMySqlTool import SaveBookInfoToMySqlToo
from public.MySqlTool import MySqlTool
from public.Logger import Logger
from public.DataTool import DataTool
from public.RedisTool import RedisTool
class GetBookTXT(object):
def __init__(self, getBookIdsListSize, rdsKeyName):
self.b_getBookIdsListSize = int(getBookIdsListSize)
self.b_rdsKeyName = rdsKeyName
self.b_title = 'getBookTXT'
self.b_second = 1
self.b_timeStr = moment.now().format('YYYY-MM-DD-HH-mm-ss')
self.dataToo = DataTool(logName=self.b_title, second=self.b_second, timeStr=self.b_timeStr)
self.mySql = MySqlTool(logName=self.dataToo.initLogName())
self.logger = Logger(logname=self.dataToo.initLogName(), loglevel=1, logger=self.b_title).getlog()
self.rds = RedisTool()
self.getBookInfoToo = GetBookInfoTool(second=self.b_second, dataToo=self.dataToo, logger=self.logger)
self.saveBookInfoToMySqlToo = SaveBookInfoToMySqlToo(second=self.b_second, logger=self.logger,
getBookInfoToo=self.getBookInfoToo,
mySql=self.mySql, dataToo=self.dataToo)
def target(self):
links = []
for i in range(self.b_getBookIdsListSize):
link = self.rds.r.lpop(self.b_rdsKeyName)
if link != None:
link = link.decode(encoding='utf-8')
links.append(link)
return links
def contentsLoad(self):
links = self.target()
if len(links) <= 0:
self.logger.debug('bookTxtLoad 没有数据\n')
return
for item in links:
self.logger.debug(item)
self.saveBookInfoToMySqlToo.saveText(link=item)
self.isOk()
def isOk(self):
self.contentsLoad()
if __name__ == '__main__':
rdsKeyName = 'bookIdsList3'
getBookIdsListSize = input("每次获取多少条链接(最大1000): >>")
maxCatalogNex = 1
print(
'\n\n参数确认: rdsKeyName : %s | getBookIdsListSize : %s \n\n' % (rdsKeyName, getBookIdsListSize))
time.sleep(1)
isStart = input("是否开始?(y/n): >>")
if (isStart == 'y'):
book = GetBookTXT(getBookIdsListSize=getBookIdsListSize, rdsKeyName=rdsKeyName)
book.contentsLoad()
else:
print('取消抓取')
|
[
"395548460@qq.com"
] |
395548460@qq.com
|
8cef065bb4c5e40d9a10b44e754dc7f3bd86eee2
|
e5e9ee9e4db2e400e7f87647501ee412c13d76e5
|
/python/open cv/magic_cloth.py
|
36c4deda71edb9cb0519277e6e8dc032facd3f67
|
[] |
no_license
|
beingveera/whole-python
|
524441eec44379c36cb1cfeccdbc65bf1c15d2f6
|
3f2b3cb7528afb9605ab6f9d4d2efc856a247af5
|
refs/heads/main
| 2023-05-15T06:28:03.058105
| 2021-06-05T09:37:47
| 2021-06-05T09:37:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
'''
import cv2
import time
import numpy as np
cap = cv2.VideoCapture(0)
time.sleep(2)
background=0
#capturing the background
for i in range(30):
ret,background=cap.read()
while(cap.isOpened()):
ret,img = cap.read()
if not ret:
break
#if the image is flipped, use image = np.flip(image, axis = 1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red=np.array([0,120,70])
upper_red=np.array([10,255,255])
mask1=cv2.inRange(hsv,lower_red,upper_red)
lower_red=np.array([170,120,70])
upper_red=np.array([180,255,255])
mask2=cv2.inRange(hsv,lower_red,upper_red)
mask1 = mask1 + mask2
mask1=cv2.morphologyEx(mask1,cv2.MORPH_OPEN,np.ones((3,3),np.uint8),iterations=2)
mask1=cv2.morphologyEx(mask1,cv2.MORPH_DILATE,np.ones((3,3),np.uint8),iterations=1)
mask2=cv2.bitwise_not(mask1)
res1=cv2.bitwise_and(background,background,mask=mask1)
res2=cv2.bitwise_and(img,img,mask=mask2)
final_output=cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow('Eureka !!', final_output)
k=cv2.waitKey(10)
if k==27:
break
cap.release()
cv2.destroyAllWindows()
'''
import cv2
import numpy as np
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background = 0
for i in range(30):
ret, background = cap.read()
while (cap.isOpened()):
ret, img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([80, 125, 50])
upper_red = np.array([90, 255,255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([110, 120, 70])
upper_red = np.array([120, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 = mask1 + mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(img, img, mask=mask2)
res2 = cv2.bitwise_and(background, background, mask=mask1)
final_output=cv2.addWeighted(res1, 1, res2, 1, 0)
cv2.imshow('Magical Cloack' ,final_output)
k=cv2.waitKey(1)
if k==27:
break
cap.release()
cv2.destroyAllWindows()
exit()
|
[
"sharma.lokesh.222001@gmail.com"
] |
sharma.lokesh.222001@gmail.com
|
5b1cc4a6717f7b454656c18eceed3a052a2f5586
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/311.py
|
b83b53e1e53057ef2a8ec9616114038309e9174b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
import sys
input = sys.stdin
def solve(S,k):
S = list(S)
count = 0
for i in range(len(S)-k+1):
if S[i]=='-':
for j in range(k):
S[i+j] = '-' if S[i+j]=='+' else '+'
count+=1
for j in range(k):
if S[-j]=='-':
return 'IMPOSSIBLE'
return count
for case in range(int(input.readline())):
values = input.readline().split()
print("Case #"+ str(case+1) +":", solve(values[0],int(values[1])))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2db407d8e2e6ffc8407c3b7340d1f4b8c3f2563d
|
b99bbc50ab1d039948ccf853963ae044a97498fb
|
/src/api/com_interface/urls/live.py
|
5c48f7ee8cbec7860dd179d7c5a533b6aabf7838
|
[] |
no_license
|
fan1018wen/Alpha
|
26899cc0eb6761bf6bd8089e7d12716c9e7ae01e
|
c50def8cde58fd4663032b860eb058302cbac6da
|
refs/heads/master
| 2021-05-12T12:54:15.747220
| 2017-10-11T10:58:51
| 2017-10-11T10:58:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@date: 2016-06-13
@author: Devin
"""
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from ..views import live
urlpatterns = [
url(r'^$', live.LiveViewList.as_view()),
url(r'^(?P<pk>\S+)$', live.LiveView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"mengqiang@1b2b.cn"
] |
mengqiang@1b2b.cn
|
e908fff3d5d94413a53b9568c9463d0369bdf469
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/ec_11423-2311/sdB_EC_11423-2311_coadd.py
|
236e5bdcb2b2d2516b7158fc1ff8a94b2e130934
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[176.209125,-23.471569], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_EC_11423-2311/sdB_EC_11423-2311_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_EC_11423-2311/sdB_EC_11423-2311_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
f5a7b3700c827074a7351ba3ab12a4f25393f769
|
f8a317ace8e91d5b962586953bc38ef6ff2d3a20
|
/src/finanzas/authentication/serializers.py
|
a7ed97b3d5c459ca39609412c46d4b1f1b884296
|
[
"Apache-2.0"
] |
permissive
|
jualjiman/finanzas
|
e63cae335d33b773874c913d23fc54a21a7ea5e9
|
a1af6f1a212a3cf172bf84eb668245dbffeb33a9
|
refs/heads/master
| 2020-05-18T17:15:48.246703
| 2015-08-10T02:38:08
| 2015-08-10T02:38:08
| 40,459,464
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
from django.contrib.auth import update_session_auth_hash
from rest_framework import serializers
from finanzas.authentication.models import Account
class AccountSerializer(serializers.ModelSerializer):
password = serializers.CharField(
write_only=True,
required=False
)
confirm_password = serializers.CharField(
write_only=True,
required=False
)
class Meta:
model = Account
fields = (
'id', 'email', 'username',
'created_at', 'updated_at',
'first_name', 'last_name', 'password',
'confirm_password'
)
read_only_fields = (
'created_at',
'updated_at'
)
def create(self, validated_data):
return Account.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.username = validated_data.get(
'username',
instance.username
)
password = validated_data.get(
'password',
None
)
confirm_password = validated_data.get(
'confirm_password',
None
)
if password and confirm_password and password == confirm_password:
instance.set_password(password)
instance.save()
update_session_auth_hash(
self.context.get('request'),
instance
)
return instance
|
[
"jualjiman@gmail.com"
] |
jualjiman@gmail.com
|
546b04313b4d6e5b2d9dd2d7e686e24a9dbd28b8
|
cda34a391e1d3fd96cdff8ea64d5dd73dc0e83e4
|
/educa/courses/models.py
|
63db5d5651ab6ae7c028bcc918bcd834624cf9b5
|
[
"MIT"
] |
permissive
|
prakharchoudhary/DjangoSpree
|
ee824dd44c015984a85f68105e40e1202093f757
|
20c5a1d9eb5d00288ebe16d238525ba8cc5fad09
|
refs/heads/master
| 2021-01-02T09:46:43.599914
| 2018-06-26T08:14:02
| 2018-06-26T08:14:02
| 99,300,583
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,440
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from .fields import OrderField
# Create your models here.
class Subject(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Course(models.Model):
owner = models.ForeignKey(User,
related_name='courses_created')
subject = models.ForeignKey(Subject,
related_name='courses')
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
overview = models.TextField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.title
class Module(models.Model):
course = models.ForeignKey(Course,
related_name='modules')
title = models.CharField(max_length=200)
description = models.TextField(blank=True)
order = OrderField(blank=True, for_fields=['course'])
class Meta:
ordering = ['order']
def __str__(self):
return '{}. {}'.format(self.order, self.title)
class Content(models.Model):
"""
- content_type : A ForeignKey field to the ContentType model
- object_id : This is PositiveIntegerField to store the primary key of the related object
- item : A GenericForeignKey field to the related object by combining the two previous fields
"""
module = models.ForeignKey(Module,
related_name='contents')
content_type = models.ForeignKey(ContentType,
limit_choices_to={'model__in': ('text',
'video', 'image', 'file')})
object_id = models.PositiveIntegerField()
item = GenericForeignKey('content_type', 'object_id')
order = OrderField(blank=True, for_fields=['module'])
class Meta:
ordering = ['order']
class ItemBase(models.Model):
owner = models.ForeignKey(User,
related_name='%(class)s_related')
title = models.CharField(max_length=250)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def __str__(self):
return self.title
class Text(ItemBase):
content = models.TextField()
class File(ItemBase):
file = models.FileField(upload_to='files')
class Image(ItemBase):
file = models.FileField(upload_to='images')
class Video(ItemBase):
url = models.URLField()
|
[
"prakhar2397@gmail.com"
] |
prakhar2397@gmail.com
|
f637b0557e6e594194bba1fd65a263d48d42cad6
|
6a95b330e1beec08b917ff45eccfd6be3fd4629f
|
/kubernetes/test/test_v1_config_map_projection.py
|
daaae0c01f9732a6e5e977ef6d7d6d9335d2fbe3
|
[
"Apache-2.0"
] |
permissive
|
TokkoLabs/client-python
|
f4a83d6540e64861b59e322c951380a670578d7f
|
f1ad9c6889105d8510472606c98f8d3807f82020
|
refs/heads/master
| 2023-07-14T01:36:46.152341
| 2017-12-21T21:32:11
| 2017-12-21T21:32:11
| 115,042,671
| 0
| 0
|
Apache-2.0
| 2021-08-06T03:29:17
| 2017-12-21T20:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_config_map_projection import V1ConfigMapProjection
class TestV1ConfigMapProjection(unittest.TestCase):
""" V1ConfigMapProjection unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ConfigMapProjection(self):
"""
Test V1ConfigMapProjection
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_config_map_projection.V1ConfigMapProjection()
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
e856163f7b9d175e64f44a92b7a8655da0287f5a
|
cf7b827958166c8569eb58deb511cc3f07567741
|
/in_Python/0832 Flipping an Image.py
|
78099861650cb1394f6cac05f0f76c2d739941cc
|
[] |
no_license
|
YangLiyli131/Leetcode2020
|
e4e36eb36b1983f73b0e733455b4a7953dfebe6d
|
20623defecf65cbc35b194d8b60d8b211816ee4f
|
refs/heads/master
| 2023-08-22T06:00:55.924112
| 2021-09-18T19:04:15
| 2021-09-18T19:04:15
| 251,426,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
class Solution(object):
def iv(self,t):
if t == 1:
return 0
else:
return 1
def flipAndInvertImage(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
row = len(A)
col = len(A[0])
for r in range(row):
cur_row = A[r]
i = 0
j = col-1
while i <= j:
temp = cur_row[i]
cur_row[i] = self.iv(cur_row[j])
cur_row[j] = self.iv(temp)
i += 1
j -= 1
A[r] = cur_row
return A
|
[
"noreply@github.com"
] |
YangLiyli131.noreply@github.com
|
b22d08a95310a44da7bc43077102ca35e025dda5
|
dbf48e804e1792999854832e64a7dae9f42f71e2
|
/Spikes/spikedetekt2/spikedetekt2/core/tests/test_script.py
|
4a48caf55fc470a9bf6bde40d574f657ecf725dc
|
[] |
no_license
|
sapphire008/Python
|
15d3d7885ac82333654b6729c2a57ed760e796a8
|
b2783eabb1987091051614b8f12a4778e158a90b
|
refs/heads/master
| 2023-08-09T04:38:43.077285
| 2023-07-28T18:36:03
| 2023-07-28T18:36:03
| 9,880,648
| 15
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
"""Main module tests."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import os.path as op
import numpy as np
import tempfile
from kwiklib import (excerpts, get_params, pydict_to_python, get_filenames,
itervalues, create_trace, Experiment)
from spikedetekt2.core.script import run_spikedetekt
# -----------------------------------------------------------------------------
# Fixtures
# -----------------------------------------------------------------------------
DIRPATH = None
prm_filename = 'myexperiment.prm'
prb_filename = 'myprobe.prb'
dat_filename = 'myexperiment.dat'
name = 'myexperiment'
sample_rate = 20000.
duration = 1.
nchannels = 8
nsamples = int(sample_rate * duration)
def setup():
global DIRPATH
DIRPATH = tempfile.mkdtemp()
# Create DAT file.
raw_data = create_trace(nsamples, nchannels)
for start, end in excerpts(nsamples, nexcerpts=10, excerpt_size=10):
raw_data[start:end] += np.random.randint(low=-10000, high=10000,
size=(10, nchannels))
raw_data.tofile(op.join(DIRPATH, dat_filename))
# Create PRM file.
prm = get_params(**{
'raw_data_files': dat_filename,
'experiment_name': name,
'nchannels': nchannels,
'sample_rate': sample_rate,
'detect_spikes': 'positive',
'prb_file': prb_filename,
})
prm_contents = pydict_to_python(prm)
with open(op.join(DIRPATH, prm_filename), 'w') as f:
f.write(prm_contents)
# Create PRB file.
prb_contents = """
nchannels = %NCHANNELS%
channel_groups = {0:
{
'channels': list(range(nchannels)),
'graph': [(i, i + 1) for i in range(nchannels - 1)],
}
}""".replace('%NCHANNELS%', str(nchannels)).replace(' ', '')
with open(op.join(DIRPATH, prb_filename), 'w') as f:
f.write(prb_contents)
def teardown():
os.remove(op.join(DIRPATH, prm_filename))
os.remove(op.join(DIRPATH, prb_filename))
files = get_filenames(name, dir=DIRPATH)
[os.remove(path) for path in itervalues(files)]
# -----------------------------------------------------------------------------
# Main tests
# -----------------------------------------------------------------------------
def test_main_1():
run_spikedetekt(op.join(DIRPATH, prm_filename))
# Open the data files.
with Experiment(name, dir=DIRPATH) as exp:
nspikes = len(exp.channel_groups[0].spikes)
assert exp.channel_groups[0].spikes.clusters.main.shape[0] == nspikes
assert exp.channel_groups[0].spikes.features_masks.shape[0] == nspikes
assert exp.channel_groups[0].spikes.waveforms_filtered.shape[0] == nspikes
fm = exp.channel_groups[0].spikes.features_masks
assert fm[:,:,0].min() < fm[:,:,0].max()
# Make sure the masks are not all null.
assert fm[:,:,1].max() > 0
|
[
"cui23327@gmail.com"
] |
cui23327@gmail.com
|
c4560ba0e5f05479e057ca93209cfac3c81a3528
|
c3ace26cd05f3dc2097b2302ff9f5078468df8b3
|
/flask-app/models.py
|
1edc8fe6f05a14a0b72284518d949d87002760f1
|
[] |
no_license
|
rid47/lecture4
|
ccdf5ff49c99eb28c098c9169648cbcbee66207e
|
398b1f1d94ad19bf9abce843e1750621297f6e4e
|
refs/heads/master
| 2022-04-24T16:56:42.697938
| 2020-04-26T08:35:03
| 2020-04-26T08:35:03
| 258,978,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Flight(db.Model):
__tablename__ = "flights"
id = db.Column(db.Integer, primary_key=True)
origin = db.Column(db.String, nullable=False)
destination = db.Column(db.String, nullable=False)
duration = db.Column(db.Integer, nullable=False)
passengers = db.relationship("Passenger", backref="flight", lazy=True)
def add_passenger(self, name):
p = Passenger(name=name, flight_id=self.id)
db.session.add(p)
db.session.commit()
class Passenger(db.Model):
__tablename__ = "passengers"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=True)
flight_id = db.Column(db.Integer, db.ForeignKey("flights.id"), nullable=False)
|
[
"ridwanmizan@gmail.com"
] |
ridwanmizan@gmail.com
|
1d121b81f5661077acf9d3396c2d18b2a8aafe47
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5753053697277952_1/Python/Caust1c/a1.py
|
bf1d9a842fdbb5f014850c2d3225eece06188450
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,426
|
py
|
### Set the input and output file names
import time
import datetime
import string
import operator
filename = 'A-large'
input_filename = filename + '.in'
output_filename = filename + '.out.' + datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H%M%S') + '.txt'
def findmax(arr_int):
m = max(arr_int)
return [i for i, j in enumerate(arr_int) if j == m]
### Open input file for reading
with open(input_filename) as f:
lines = f.read().splitlines()
### Open output file for writing
with open(output_filename, 'w') as output:
######################################################
### Initialise variables from first line of the file
######################################################
vars = lines[0].split(' ')
cases = int(vars[0]) # number of cases
print(str(cases) + ' cases detected.') # [soft validation]
lineNum = 1 # first case starts here
caseNum = 0 # for counting the num of cases
caseSize_r = 1 # number of rows in each case; default = 1
caseSize_c = 1 # number of columns in each case; default = 1
infoLines = True # Toggle according to question
#infoLines = False # Toggle according to question
### i.e. infoLines == True
if infoLines:
while lineNum < len(lines):
### A new case! Initialize some variables
caseNum += 1 # case number count
party_count = int(lines[lineNum].split(' ')[0])
party_sizes = map(int, lines[lineNum + 1].split(' '))
party_names = string.uppercase[:party_count]
room_total = sum(party_sizes)
print('--------')
print('Case #%d: %s' % (caseNum, " ".join(str(x) for x in party_names)))
print('Case #%d: %s' % (caseNum, " ".join(str(x) for x in party_sizes)))
print('Case #%d: %d total people' % (caseNum, room_total))
print('Case #%d: maxcases in index: %s' % (caseNum, " ".join(str(x) for x in findmax(party_sizes))))
print('Case #%d: %d maxcases' % (caseNum, len(findmax(party_sizes))))
### Do the Work!
### TODO!
myAns = ''
while room_total > 0:
#if room_total == 1:
# myAns = join(myAns, ' ',
if room_total >= 4:
party_maxes = findmax(party_sizes)
if len(party_maxes) == 1:
print('step. 4+ remain (1 max)')
party_sizes[party_maxes[0]] += -1
myAns = myAns + (' %s' % (party_names[party_maxes[0]]))
print('%s' % (party_names[party_maxes[0]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -1
else:
print('step. 4+ remain (2+ max)')
party_sizes[party_maxes[0]] += -1
party_sizes[party_maxes[1]] += -1
myAns = myAns + (' %s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -2
elif room_total == 3:
print('step. 3 remain')
party_maxes = findmax(party_sizes)
party_sizes[party_maxes[0]] += -1
myAns = myAns + (' %s' % (party_names[party_maxes[0]]))
print('%s' % (party_names[party_maxes[0]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -1
elif room_total == 2:
print('step. 2 remain')
party_maxes = findmax(party_sizes)
party_sizes[party_maxes[0]] += -1
party_sizes[party_maxes[1]] += -1
myAns = myAns + (' %s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s%s' % (party_names[party_maxes[0]],party_names[party_maxes[1]]))
print('%s' % (" ".join(str(x) for x in party_sizes)))
room_total += -2
else:
print('###################ERROR')
room_total = -1
### Output myArr
print('Case #%d:%s' % (caseNum, myAns))
output.write('Case #%d:%s\n' % (caseNum, myAns))
### Step
lineNum += 2
### i.e. infoLines == False
else:
print('deadend')
### END
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
665fbacd18a16c4a0381bbb91ebfed9745bf12a4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02927/s950956417.py
|
a97f1206dbc46df6ba04a9ae44abc68733ed10d6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
m,d = map(int,input().split())
cnt = 0
for i in range(1,m+1):
for j in range(1,d+1):
#print(i,j)
num = 1
d2 = (j-j%10)//10
d1 = j%10
if d2 >=2 and d1 >=2:
num =d1*d2
#print(i,j,num,d1,d2)
#print(i,j,num,(j-j%10),j%10)
if num == i:
cnt +=1
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
24febda16aaf84019c85d59f061b3d6ff3898bc0
|
43ab33b2f50e47f5dbe322daa03c86a99e5ee77c
|
/test/test_od_mcomplex_type_definition_picture.py
|
493605438a6e7cfdeebbd08be2e3907161a9123f
|
[] |
no_license
|
Sage-Bionetworks/rcc-client
|
c770432de2d2950e00f7c7bd2bac22f3a81c2061
|
57c4a621aecd3a2f3f9faaa94f53b2727992a01a
|
refs/heads/main
| 2023-02-23T05:55:39.279352
| 2021-01-21T02:06:08
| 2021-01-21T02:06:08
| 331,486,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,525
|
py
|
# coding: utf-8
"""
nPhase REST Resource
REDCap REST API v.2 # noqa: E501
The version of the OpenAPI document: 2.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import rcc
from rcc.models.od_mcomplex_type_definition_picture import ODMcomplexTypeDefinitionPicture # noqa: E501
from rcc.rest import ApiException
class TestODMcomplexTypeDefinitionPicture(unittest.TestCase):
"""ODMcomplexTypeDefinitionPicture unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ODMcomplexTypeDefinitionPicture
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = rcc.models.od_mcomplex_type_definition_picture.ODMcomplexTypeDefinitionPicture() # noqa: E501
if include_optional :
return ODMcomplexTypeDefinitionPicture(
picture_file_name = '0',
image_type = '0'
)
else :
return ODMcomplexTypeDefinitionPicture(
)
def testODMcomplexTypeDefinitionPicture(self):
"""Test ODMcomplexTypeDefinitionPicture"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"thomas.yu@sagebase.org"
] |
thomas.yu@sagebase.org
|
8bf53f137afc4728262b2316165a54280baa1e66
|
acc244c97a943d8e2074339afa1bff1274ae4cfc
|
/scripts/cgat_build_report_page.py
|
7cd893e0d601bb43ea071b7da33f5b20a3caf22d
|
[] |
no_license
|
eromasko/cgat
|
00114f4c95b439ba6595ddf2092d1a3307347401
|
d82d197f3913b8d65b656c0b205ca48854fdb2a6
|
refs/heads/master
| 2021-01-17T09:37:17.168278
| 2015-02-20T09:03:31
| 2015-02-20T09:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,581
|
py
|
'''
cgat_build_report_page.py - build report page for all projects
=======================================================================
:Author:
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
This script scans all of :file:`/ifs/projects/sftp` for :file:`index.html` files
and outputs an html formatted summary table into :file:`/ifs/projects/overview`.
Usage
-----
Example::
python cgat_build_report_page.py
Type::
python cgat_build_report_page.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import re
import optparse
import subprocess
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id: cgat_script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-p", "--path", dest="path", type="string",
help="path to scan for files [%default]")
parser.add_option("-d", "--destination", dest="destination", type="string",
help="path to deposit files into [%defaul]")
parser.set_defaults(path='/ifs/projects/sftp',
url='http://www.cgat.org/downloads/',
dest='/ifs/projects/overview')
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
statement = "find %s -name 'index.html'" % options.path
process = subprocess.Popen(statement,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
files = stdout.split('\n')
files.sort()
outfile = IOTools.openFile(os.path.join(options.dest, "index.html"), "w")
outfile.write( '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>CGAT project reports</title>
<link rel="stylesheet" href="cgat.css" type="text/css" />
<link rel="stylesheet" href="pygments.css" type="text/css" />
<link rel="shortcut icon" href="http://cgatwiki.anat.ox.ac.uk/favicon.ico">
<script type="text/javascript" src="sorttable.js"></script>
</head>
<body>
<div class="related">
<h3>Navigation</h3>
<ul>
<li><a href="index.html">CGAT Projects Overview</a> »</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">
<div class="section" id="cgat-pipelines">
<H1>CGAT exported project pages</H1>
<p>
This page is for internal use only. Do not distribute outside of CGAT and
do not make this page available on the world wide web.
</p>
<table class="sortable">\n''' )
outfile.write(
'''<tr><th>Project</th><th>Report</th><th>Title</th></tr>\n''' )
for f in files:
if f == '':
continue
proj = re.search('(proj\d+)', f).groups()[0]
relpath = re.sub('.*proj\d+/', '', f)
report = re.sub('^[^/]*/', '', os.path.dirname(relpath))
lines = IOTools.openFile(f).readlines()
titles = [x for x in lines if "<title>" in x]
if titles:
title = re.search("<title>(.*)</title>", titles[0]).groups()[0]
else:
title = "NA"
if title.endswith("documentation"):
title = title[:-len("documentation")]
url = os.path.join(options.url, relpath)
outfile.write(
'<tr><td>%(proj)s</td><td><a HREF="%(url)s">%(report)s</td><td>%(title)s</td></tr>\n' % locals())
outfile.write( '''
</table>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
<p class="logo"><a href="contents.html">
<img class="logo" src="cgat_logo.png" alt="Logo"/>
</a></p>
</body>
</html>\n''' )
outfile.close()
E.info('created output file %s' % outfile.name)
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"andreas.heger@gmail.com"
] |
andreas.heger@gmail.com
|
787e85a801a1a850d53cb69481a3ab6f9107e2b8
|
c86e31e8e67ccb9a164903e394df7a444b5406de
|
/avg_word2vec.py
|
73495851d695da43e749fa28b2040e508a22b1e8
|
[] |
no_license
|
candlewill/short_texts_sentiment_analysis
|
fb0e329c4c1ad6f8a72e6c2858a921913dde38b2
|
760e60e1cf430a8d0b1a313523a0c6f773a9c4c1
|
refs/heads/master
| 2020-04-24T15:36:51.258749
| 2015-07-14T08:28:54
| 2015-07-14T08:28:54
| 38,301,099
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
from sklearn.cross_validation import train_test_split
from gensim.models.word2vec import Word2Vec
from load_data import load_train_data, load_processed_data
import numpy as np
from sklearn.preprocessing import MinMaxScaler
# The following skills is useful
# train_test_split(np.array(texts), np.array(sentiemnt), test_size=0.2)
x_train, y_train = load_processed_data(data_type='train', stem=False)
x_test, y_test = load_processed_data(data_type='test', stem=False)
from preprocess import preprocessor as preprocess
n_dim = 100
scaling = False
# Initialize model and build vocab
imdb_w2v = Word2Vec(size=n_dim, min_count=10)
imdb_w2v.build_vocab(x_train)
# Train the model over train_reviews (this may take several minutes)
imdb_w2v.train(x_train)
# Build word vector for training set by using the average value of all word vectors in the tweet, then scale
# from load_data import load_word_embedding
# imdb_w2v = load_word_embedding()
def buildWordVector(text, size):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in text:
try:
vec += imdb_w2v[word].reshape((1, size))
count += 1.
except KeyError:
continue
if count != 0:
vec /= count
return vec
from sklearn.preprocessing import scale
train_vecs = np.concatenate([buildWordVector(z, n_dim) for z in x_train])
if scaling == True:
train_vecs = scale(train_vecs)
# Train word2vec on test tweets
# imdb_w2v.train(x_test)
# Build test tweet vectors then scale
test_vecs = np.concatenate([buildWordVector(z, n_dim) for z in x_test])
if scaling == True:
test_vecs = scale(test_vecs)
min_max_scaler = MinMaxScaler()
train_vecs = min_max_scaler.fit_transform(train_vecs)
test_vecs = min_max_scaler.fit_transform(test_vecs)
# Use classification algorithm (i.e. Stochastic Logistic Regression) on training set, then assess model performance on test set
from classifiers import gNB, mNB
from analysis import analysis_result
pre = mNB(train_vecs, y_train, test_vecs)
analysis_result(pre, y_test)
|
[
"yunchaohe@gmail.com"
] |
yunchaohe@gmail.com
|
b5f8821fc9ecb1f613a863bf6fbc7f174e5ca53a
|
905750d3f6bf6232ffefd00ce74b4c7684d7f27e
|
/lmp_lib.py
|
b03eae6590766e65883ac744785cfa5ba362e2f7
|
[] |
no_license
|
petervanya/GeneralScripts
|
d1147b89defade68e68122e892e8844f7d4c0e64
|
77c0180156ceb78f08fabf7481c16be8d9aa8bfa
|
refs/heads/master
| 2020-12-19T12:45:46.227823
| 2016-07-28T14:56:59
| 2016-07-28T14:56:59
| 40,310,828
| 0
| 2
| null | 2015-11-30T15:53:57
| 2015-08-06T14:58:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
#!/usr/bin/env python
"""
A collection of functions to manipulate LAMMPS files
pv278@cam.ac.uk, 11/01/16
"""
import numpy as np
# ===== print input
def header2str(N, Nbonds, atomtypes, bondtypes, L):
"""Generate LAMMPS header"""
s = "#blabla\n"
s += str(N) + " atoms\n"
s += str(Nbonds) + " bonds\n"
s += str(atomtypes) + " atom types\n"
s += str(bondtypes) + " bond types\n"
s += "\n"
s += "0.0 " + str(L) + " xlo xhi\n"
s += "0.0 " + str(L) + " ylo yhi\n"
s += "0.0 " + str(L) + " zlo zhi\n\n"
return s
def mass2str(masses):
"""Print mass dictionary into string for LAMMPS data file"""
s = "Masses\n\n"
for k, v in masses.items():
s += str(k) + " " + str(v) + "\n"
return s + "\n"
def pair_dpd_coeffs2str(coeffs):
"""
Structure:
* key: "part1 part2"
* value: [force, gamma, cutoff]
"""
s = "PairIJ Coeffs\n\n"
for k, v in coeffs.items():
s += "%s %s %s %s\n" % (str(k), str(v[0]), str(v[1]), str(v[2]))
return s + "\n"
def bond_coeffs2str(k_ij):
"""Print bond coefficients into string.
Structure:
* key: 1..4
* value [k_ij, r0]
"""
s = "Bond Coeffs\n\n"
for k, v in k_ij.items():
s += "%s %s %s\n" % (str(k), "%e" % v[0], "%e" % v[1])
return s + "\n"
def atoms2str(mat):
"""Convert atomic matrix to str, atom_type molecular
xyz_mat[:, 0] are atom ids"""
M = len(mat)
s = ""
for i in range(M):
s += "%i\t%i\t%i\t%e\t%e\t%e\n" % \
(i+1, mat[i, 0], mat[i, 1], mat[i, 2], mat[i, 3], mat[i, 4])
return s + "\n"
def bonds2str(bond_mat):
"""Convert bond matrix to string"""
M, N = bond_mat.shape
s = ""
for i in range(M):
s += str(i+1) + "\t"
for j in range(N):
s += str(bond_mat[i, j]) + "\t"
s += "\n"
return s + "\n"
# ===== manipulate output
def read_xyzfile(outfile):
"""Read one xyz outfile into a numpy matrix"""
A = open(outfile, "r").readlines()[2:]
A = [line.split() for line in A]
A = np.array(A, order="F").astype(float)
return A
def save_xyzfile(fname, mat):
"""Take xyz matrix [ids, x, y, z] and save into fname"""
N = len(mat)
with open(fname, "w") as f:
f.write(str(N) + "\nbla\n")
for i in range(N):
f.write("%i\t%f\t%f\t%f\n" % (mat[i, 0], mat[i, 1], mat[i, 2], mat[i, 3]))
|
[
"peter.vanya@gmail.com"
] |
peter.vanya@gmail.com
|
da902d2befb4663c420a37b3b99b524787e0a04d
|
dc51e4714820d991e7d0e94b3e9eac4dbc67eea7
|
/历史练习1120/host_management/utils/page.py
|
e26b17ffd6eb6a51666b153df1c3c5b40d4a2517
|
[] |
no_license
|
ruoxiaojie/Django
|
537d27abe9ebb85e0dfc69585f318a87e7514a70
|
92b88600953cd4ff743032cab3d4785437c949e0
|
refs/heads/master
| 2021-01-15T22:18:56.033883
| 2018-03-09T06:15:46
| 2018-03-09T06:15:46
| 99,894,862
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
#!/usr/bin/python
#Author:xiaojie
# -*- coding:utf-8 -*-
class Page(object):
def __init__(self,current_page,all_count,base_url,per_page=10,pager_page_count=11):
"""
分页初始化
:param current_page: 当前页码
:param per_page: 每页显示数据条数
:param all_count: 数据库中总条数
:param pager_page_count: 页面上最多显示的页码数量
"""
self.base_url = base_url
self.current_page = current_page
self.per_page = per_page
self.all_count = all_count
self.pager_page_count = pager_page_count
pager_count, b = divmod(all_count, per_page)
if b != 0:
pager_count += 1
self.pager_count = pager_count
half_pager_page_count = int(pager_page_count / 2)
self.half_pager_page_count = half_pager_page_count
@property
def start(self):
"""
数据获取值起始索引
:return:
"""
return (self.current_page - 1) * self.per_page
@property
def end(self):
"""
数据获取值结束索引
:return:
"""
return self.current_page * self.per_page
def page_html(self):
"""
生成HTML页码
:return:
"""
# 如果数据总页码pager_count<11 pager_page_count
if self.pager_count < self.pager_page_count:
pager_start = 1
pager_end = self.pager_count
else:
# 数据页码已经超过11
# 判断: 如果当前页 <= 5 half_pager_page_count
if self.current_page <= self.half_pager_page_count:
pager_start = 1
pager_end = self.pager_page_count
else:
# 如果: 当前页+5 > 总页码
if (self.current_page + self.half_pager_page_count) > self.pager_count:
pager_end = self.pager_count
pager_start = self.pager_count - self.pager_page_count + 1
else:
pager_start = self.current_page - self.half_pager_page_count
pager_end = self.current_page + self.half_pager_page_count
page_list = []
if self.current_page <= 1:
prev = '<a href="#">上一页</a>'
else:
prev = '<a href="%s?page=%s">上一页</a>' % (self.base_url,self.current_page - 1,)
page_list.append(prev)
for i in range(pager_start, pager_end + 1):
if self.current_page == i:
tpl = '<a class="active" href="%s?page=%s">%s</a>' % (self.base_url,i, i,)
else:
tpl = '<a href="%s?page=%s">%s</a>' % (self.base_url,i, i,)
page_list.append(tpl)
if self.current_page >= self.pager_count:
nex = '<a href="#">下一页</a>'
else:
nex = '<a href="%s?page=%s">下一页</a>' % (self.base_url,self.current_page + 1,)
page_list.append(nex)
page_str = "".join(page_list)
return page_str
|
[
"475030894@qq.com"
] |
475030894@qq.com
|
e33e9cf7982165ee7ef95e0f9d59cf8b243ce4da
|
755e4e6e966433fe887f0f28f14916696b1588d7
|
/code/evaluation/video/BackgroundSubtractorDummy.py
|
8944c4aecc694c2070e8febb9edd136dc40f026e
|
[] |
no_license
|
phildue/FingerspellingRecognition
|
f18518a6e2e29b769d131e5b54846f00213f3ff1
|
1b5236142734d7b50f0f4161ecc533b7d10347b8
|
refs/heads/master
| 2021-03-24T10:40:24.507766
| 2017-07-03T09:33:09
| 2017-07-03T09:33:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
import cv2
from preprocessing.segmentation.BackgroundSubtractor import BackgroundSubtractor
class BackgroundSubtractorDummy(BackgroundSubtractor):
def __init__(self, diff_img, test_img):
super().__init__()
self.diff_img = diff_img
self.test_img = test_img
def get_background(self):
return cv2.cvtColor(self.test_img + self.diff_img, cv2.COLOR_RGB2GRAY)
|
[
"phild@protonmail.com"
] |
phild@protonmail.com
|
4fbdf8557e5a9c34e48538a69f02f7ebb9a07e07
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_pow3/trend_linear/cycle_0/ar_/test_artificial_128_pow3_linear_0__0.py
|
02a765a3634a574c09d19185e979246cdc9569dc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232
| 2017-09-21T11:19:04
| 2017-09-21T11:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 128 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = 0, transform = "pow3", sigma = 0.0, exog_count = 0, ar_order = 0);
art.process_dataset(dataset);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
fb210f7f5afaf1f68f627a6e2dcde02c698f21ca
|
011a750fae8ade67f726a9749e05cc4afb8e360d
|
/Vivo_create case/test.py
|
e6a6e3b03328bade918031fabf7cb58627217543
|
[] |
no_license
|
MartinCarufel/PycharmProjects
|
c7e50b66a24d4a216b7a217192fcd446f5324d9f
|
aaa6c95b3e2e6525586fb6a03d1c9d484065899b
|
refs/heads/master
| 2023-07-05T22:40:38.650892
| 2023-06-26T13:55:13
| 2023-06-26T13:55:13
| 150,859,642
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
import pyautogui as pg
class Test_suite:
def __init__(self):
self.win_main = {
'win logo': 'win_logo.png',
'new case': 'new case.png'
}
pass
def test1(self):
# location = pg.locateCenterOnScreen('win_logo.png', confidence=0.9)
# print(location)
# pg.moveTo(pg.locateCenterOnScreen('win_logo.png', confidence=0.9))
pg.click(pg.locateCenterOnScreen(self.win_main['new case'], confidence=0.9))
t = Test_suite()
t.test1()
|
[
"maccam6@gmail.com"
] |
maccam6@gmail.com
|
6e5b0ee9ab79d06d5476373ffc2f7c284b0045e4
|
4c614f080633e6a4b372047191b3930f0a17c714
|
/blog/views.py
|
19719c7f9c1f81f3f0f3c4b208c80c72056e9356
|
[] |
no_license
|
Eimis/Blog
|
963d942d1ca8bcce62c14840906d89ec16fbbca1
|
92b61f5807dd28515c22efc7e300da16d8f9e33d
|
refs/heads/master
| 2020-04-06T05:03:38.029555
| 2014-03-09T21:35:03
| 2014-03-09T21:35:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,504
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from blog.models import Post
from blog.forms import ContactForm
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
def Main(request):
lastest_posts = Post.objects.filter(display=True).order_by("-pub_date",) [:2]
return render(request, "main.html", {"lastest_posts": lastest_posts, })
def Posts(request): # list of Posts in blog
posts = Post.objects.all().order_by("-pub_date")
return render(request, "blog.html", {"posts" : posts,})
def post(request, slug):
post = Post.objects.get(slug=slug) ## Atvaizduoja atitinkama posta pagal slug.
## Apparently skirtinguose viewsuose galima
## naudoti vienodus vardus.
return render(request, "post.html", {"post" : post})
def About(request):
post = Post.objects.get(title = "About")
return render(request, "about.html", {"post" : post})
def Contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
send_mail(
cd['subject'],
cd['message'],
cd.get('email', 'noreply@example.com'),
['eimantas.stonys@gmail.com'],
)
return HttpResponseRedirect('/contact/thanks/')
else:
form = ContactForm()
return render(request, 'contact.html', {'form': form})
def Thanks(request):
return render(request, "thanks.html",)
|
[
"eimantas.stonys@gmail.com"
] |
eimantas.stonys@gmail.com
|
ab8dedf977f6fb17bb98fb4a05ac19722ff1c918
|
4e5e65d7c07f1a4a85899f10a4eea4977a2ce9a9
|
/scripts/clock_deblur.py
|
f1ec2b5bbc551fd0f1de8cafae922033946d6086
|
[] |
no_license
|
stefanv/aims2014
|
6f5545c668269c1a5db8876fcc3d0735b5225aa7
|
384b9ee8d21dce6d9092ec4b8416859a90225bfc
|
refs/heads/master
| 2020-05-27T07:46:09.487556
| 2014-07-18T14:35:15
| 2014-07-18T14:35:15
| 21,921,085
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.base import Plugin
image = io.imread('../images/clock_motion.png')
M, N = image.shape
## Should pad, but doesn't make much difference in this case
MM, NN = 2 * M + 1, 2 * N + 1
def hann(image):
wy = np.hanning(image.shape[0])[:, None]
wx = np.hanning(image.shape[1])
## Apply Hann window to prevent ringing
wy = np.hanning(M)[:, None]
wx = np.hanning(N)
f = np.zeros((MM, NN))
f[:M, :N] = wy * wx * image
F = np.fft.fft2(f)
v, u = np.ogrid[:MM, :NN]
v -= (MM - 1)/2
u -= (NN - 1)/2
def apply_inverse_filter(image, T, a, b, K=5, clip=500):
uavb = u * a + v * b
H = T * np.sinc(uavb) * np.exp(-1j * np.pi * uavb)
H = np.fft.fftshift(H)
HH = 1./H
HH[np.abs(HH) > K] = K
gg = np.abs(np.fft.ifft2(F * HH))
gg = gg[:M, :N]
gg = np.clip(gg, 0, clip)
gg -= gg.min()
gg /= gg.max()
return gg
viewer = ImageViewer(image)
plugin = Plugin(image_filter=apply_inverse_filter)
plugin += Slider('T', 0, 1, value=0.5, value_type='float', update_on='release')
plugin += Slider('a', -0.1, 0.1, value=0, value_type='float', update_on='release')
plugin += Slider('b', -0.1, 0.1, value=0, value_type='float', update_on='release')
plugin += Slider('K', 0, 100, value=15, value_type='float', update_on='release')
plugin += Slider('clip', 0, 1000, value=750, value_type='float', update_on='release')
viewer += plugin
viewer.show()
|
[
"stefan@sun.ac.za"
] |
stefan@sun.ac.za
|
6c0ef3d70e2d5bad70f88ff429e96d14cc060b77
|
4eb4f5c834e6448782ea11b23dd724551f8988a0
|
/media/documents/Cross Sections/Runway new/script.py
|
17aed56b9fad07affbf77397f9b070c32ce93fe0
|
[] |
no_license
|
Mohab25/Tubra_Django
|
c272736c56e970a918c174307babf7aeae85af25
|
97f422c6f0645ad8e87a247edf33a49921bde451
|
refs/heads/master
| 2023-08-13T16:29:59.504867
| 2021-09-19T14:24:44
| 2021-09-19T14:24:44
| 340,656,691
| 1
| 0
| null | 2021-07-13T06:49:36
| 2021-02-20T13:05:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
import os
from pathlib import Path
for f in os.listdir():
#print(os.path.splitext(f))
if f.endswith('pdf'):
print(os.path.abspath(f))
|
[
"homab3@gmail.com"
] |
homab3@gmail.com
|
334291795d858fe938f874cc6ecda0da265926b5
|
7137161629a1003583744cc3bd0e5d3498e0a924
|
/airflow/api_connexion/endpoints/provider_endpoint.py
|
844dcd301495c6871f6f034a8d15bd189b6ef130
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
jbampton/airflow
|
3fca85975854eb916f16143b659a9119af143963
|
dcfa14d60dade3fdefa001d10013466fe4d77f0d
|
refs/heads/master
| 2023-05-25T22:31:49.104069
| 2021-09-18T19:18:32
| 2021-09-18T19:18:32
| 247,645,744
| 3
| 0
|
Apache-2.0
| 2020-03-16T08:12:58
| 2020-03-16T08:12:57
| null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from typing import Dict, List
from airflow.api_connexion import security
from airflow.api_connexion.schemas.provider_schema import ProviderCollection, provider_collection_schema
from airflow.providers_manager import ProviderInfo, ProvidersManager
from airflow.security import permissions
def _remove_rst_syntax(value: str) -> str:
return re.sub("[`_<>]", "", value.strip(" \n."))
def _provider_mapper(provider: ProviderInfo) -> Dict:
return {
"package_name": provider[1]["package-name"],
"description": _remove_rst_syntax(provider[1]["description"]),
"version": provider[0],
}
@security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER)])
def get_providers():
"""Get providers"""
providers_info: List[ProviderInfo] = list(ProvidersManager().providers.values())
providers = [_provider_mapper(d) for d in providers_info]
total_entries = len(providers)
return provider_collection_schema.dump(
ProviderCollection(providers=providers, total_entries=total_entries)
)
|
[
"noreply@github.com"
] |
jbampton.noreply@github.com
|
ca7bf250ae56e722ce0dc3fac6cd5b4ef5bab44a
|
7357d367b0af4650ccc5b783b7a59090fdde47bb
|
/reviewboard/webapi/tests/test_api_token.py
|
3d593bfe820b220205713483c9c685fc11ca54bf
|
[
"MIT"
] |
permissive
|
BarracudaPff/code-golf-data-python
|
fb0cfc74d1777c4246d56a5db8525432bf37ab1a
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
refs/heads/main
| 2023-05-29T05:52:22.856551
| 2020-05-23T22:12:48
| 2020-05-23T22:12:48
| 378,832,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,613
|
py
|
def _compare_item(self, item_rsp, api_token):
self.assertEqual(item_rsp["id"], api_token.pk)
self.assertEqual(item_rsp["token"], api_token.token)
self.assertEqual(item_rsp["note"], api_token.note)
self.assertEqual(item_rsp["policy"], api_token.policy)
self.assertEqual(item_rsp["extra_data"], api_token.extra_data)
class APITokenTestsMixin(object):
token_data = {"note": "This is my new token.", "policy": '{"perms": "ro", "resources": {"*": {"allow": ["*"]}}}'}
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(SpyAgency, ExtraDataListMixin, BaseWebAPITestCase, APITokenTestsMixin):
"""Testing the APITokenResource list APIs."""
fixtures = ["test_users"]
sample_api_url = "users/<username>/api-tokens/"
resource = resources.api_token
test_api_token_access = False
test_oauth_token_access = False
compare_item = _compare_item
def setup_basic_get_test(self, user, with_local_site, local_site_name, populate_items):
if populate_items:
if not with_local_site:
LocalSite.objects.create(name=self.local_site_name)
items = list(user.webapi_tokens.all())
items.append(self.create_webapi_token(user, note="Token 1", with_local_site=with_local_site))
self.create_webapi_token(user, note="Token 2", with_local_site=not with_local_site)
else:
items = []
return (get_api_token_list_url(user, local_site_name), api_token_list_mimetype, items)
def test_get_with_api_token_auth_denied(self):
"""Testing the GET users/<username>/api-tokens/ API denies access
when using token-based authentication
"""
user = self._authenticate_basic_tests(with_webapi_token=True)
url = self.setup_basic_get_test(user, False, None, True)[0]
rsp = self.api_get(url, expected_status=403)
self.assertEqual(rsp["stat"], "fail")
self.assertEqual(rsp["err"]["code"], PERMISSION_DENIED.code)
def setup_basic_post_test(self, user, with_local_site, local_site_name, post_valid_data):
if post_valid_data:
post_data = self.token_data.copy()
else:
post_data = {}
return (get_api_token_list_url(user, local_site_name), api_token_item_mimetype, post_data, [local_site_name])
def check_post_result(self, user, rsp, local_site_name):
token_rsp = rsp["api_token"]
token = WebAPIToken.objects.get(pk=token_rsp["id"])
self.compare_item(token_rsp, token)
if local_site_name:
self.assertIsNotNone(token.local_site_id)
self.assertEqual(token.local_site.name, local_site_name)
else:
self.assertIsNone(token.local_site_id)
def test_post_with_generation_error(self):
"""Testing the POST users/<username>/api-tokens/ API
with Token Generation Failed error"""
def _generate_token(self, *args, **kwargs):
kwargs["max_attempts"] = 0
orig_generate_token(*args, **kwargs)
orig_generate_token = WebAPIToken.objects.generate_token
self.spy_on(WebAPIToken.objects.generate_token, call_fake=_generate_token)
rsp = self.api_post(get_api_token_list_url(self.user), self.token_data, expected_status=500)
self.assertEqual(rsp["stat"], "fail")
self.assertEqual(rsp["err"]["code"], TOKEN_GENERATION_FAILED.code)
self.assertEqual(rsp["err"]["msg"], "Could not create a unique API token. " "Please try again.")
def test_post_with_api_token_auth_denied(self):
"""Testing the POST users/<username>/api-tokens/ API denies access
when using token-based authentication
"""
user = self._authenticate_basic_tests(with_webapi_token=True)
url = self.setup_basic_post_test(user, False, None, True)[0]
rsp = self.api_get(url, expected_status=403)
self.assertEqual(rsp["stat"], "fail")
self.assertEqual(rsp["err"]["code"], PERMISSION_DENIED.code)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(ExtraDataItemMixin, BaseWebAPITestCase, APITokenTestsMixin):
"""Testing the APITokenResource item APIs."""
fixtures = ["test_users"]
sample_api_url = "users/<username>/api-tokens/<id>/"
resource = resources.api_token
test_api_token_access = False
test_oauth_token_access = False
compare_item = _compare_item
def setup_basic_delete_test(self, user, with_local_site, local_site_name):
token = self.create_webapi_token(user, with_local_site=with_local_site)
return (get_api_token_item_url(token, local_site_name), [token.pk])
def check_delete_result(self, user, token_id):
self.assertIsNone(get_object_or_none(WebAPIToken, pk=token_id))
def test_delete_with_api_token_auth_denied(self):
"""Testing the DELETE users/<username>/api-tokens/<id>/ API denies
access when using token-based authentication
"""
user = self._authenticate_basic_tests(with_webapi_token=True)
url = self.setup_basic_delete_test(user, False, None)[0]
rsp = self.api_get(url, expected_status=403)
self.assertEqual(rsp["stat"], "fail")
self.assertEqual(rsp["err"]["code"], PERMISSION_DENIED.code)
def setup_basic_get_test(self, user, with_local_site, local_site_name):
token = self.create_webapi_token(user, with_local_site=with_local_site)
return (get_api_token_item_url(token, local_site_name), api_token_item_mimetype, token)
def test_get_not_modified(self):
"""Testing the GET users/<username>/api-tokens/<id>/ API
with Not Modified response
"""
token = self.create_webapi_token(self.user)
self._testHttpCaching(get_api_token_item_url(token), check_last_modified=True)
def test_get_with_api_token_auth_denied(self):
"""Testing the GET users/<username>/api-tokens/<id>/ API denies access
when using token-based authentication
"""
user = self._authenticate_basic_tests(with_webapi_token=True)
url = self.setup_basic_get_test(user, False, None)[0]
rsp = self.api_get(url, expected_status=403)
self.assertEqual(rsp["stat"], "fail")
self.assertEqual(rsp["err"]["code"], PERMISSION_DENIED.code)
def setup_basic_put_test(self, user, with_local_site, local_site_name, put_valid_data):
token = self.create_webapi_token(user, with_local_site=with_local_site)
return (get_api_token_item_url(token, local_site_name), api_token_item_mimetype, self.token_data.copy(), token, [])
def check_put_result(self, user, item_rsp, token):
self.compare_item(item_rsp, WebAPIToken.objects.get(pk=token.pk))
def test_put_with_api_token_auth_denied(self):
"""Testing the PUT users/<username>/api-tokens/<id>/ API denies access
when using token-based authentication
"""
user = self._authenticate_basic_tests(with_webapi_token=True)
url = self.setup_basic_put_test(user, False, None, True)[0]
rsp = self.api_get(url, expected_status=403)
self.assertEqual(rsp["stat"], "fail")
self.assertEqual(rsp["err"]["code"], PERMISSION_DENIED.code)
|
[
"sokolov.yas@gmail.com"
] |
sokolov.yas@gmail.com
|
415b4b5ca762243855aa661b8bc21b3924351c99
|
47f1b125b401ac2b0d18b9ce685b5c3d13f68e3e
|
/stasis/events.py
|
13c680790a74cff9936789e4632311b3f44d75bf
|
[] |
no_license
|
fschulze/stasis
|
fb7473361c8be870851589e18a17682ebd06f92c
|
368d82b3927d365e1ca61e11600093652520b4d8
|
refs/heads/master
| 2020-05-18T02:20:49.456035
| 2014-03-18T07:18:12
| 2014-03-18T07:18:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from stasis.interfaces import IPreBuild
from zope.interface import implementer
@implementer(IPreBuild)
class PreBuild(object):
def __init__(self, site):
self.site = site.registry
|
[
"florian.schulze@gmx.net"
] |
florian.schulze@gmx.net
|
e6753a2fe1b15385d615ae5728f98d265dc21c09
|
5fda498ef0bfc06962ad9b864d229193c45ccb4a
|
/Project1_Analyzing_the_NYC_Subway_Dataset/problem_sets2to5/problem_set5_mapreduce_on_subway_data/5_2_ridership_by_weather_reducer.py
|
9c13400465ec13fe64f0972326294c13776e3bbf
|
[] |
no_license
|
prabhurgit/Data_Aanlyst_Nanodegree_projects
|
7934869b63cae57cb2851e22a5023c6cbe3d18ba
|
a7a13d93c632cd1840ba3a00fff80a60a131b7f3
|
refs/heads/master
| 2021-05-31T18:47:48.669414
| 2016-03-30T04:08:39
| 2016-03-30T04:08:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
import sys
import logging
from util import reducer_logfile
logging.basicConfig(filename=reducer_logfile, format='%(message)s',
level=logging.INFO, filemode='w')
def reducer():
'''
Given the output of the mapper for this assignment, the reducer should
print one row per weather type, along with the average value of
ENTRIESn_hourly for that weather type, separated by a tab. You can assume
that the input to the reducer will be sorted by weather type, such that all
entries corresponding to a given weather type will be grouped together.
In order to compute the average value of ENTRIESn_hourly, you'll need to
keep track of both the total riders per weather type and the number of
hours with that weather type. That's why we've initialized the variable
riders and num_hours below. Feel free to use a different data structure in
your solution, though.
An example output row might look like this:
'fog-norain\t1105.32467557'
Since you are printing the output of your program, printing a debug
statement will interfere with the operation of the grader. Instead,
use the logging module, which we've configured to log to a file printed
when you click "Test Run". For example:
logging.info("My debugging message")
'''
riders = 0 # The number of total riders for this key
num_hours = 0 # The number of hours with this key
old_key = None
for line in sys.stdin:
data = line.strip().split("\t")
if len(data) != 2:
continue
this_key = data[0]
if old_key == this_key:
num_hours += 1.0
riders += float(data[1])
else:
if old_key != None and num_hours != 0:
print "{0}\t{1}".format(old_key, riders/num_hours)
num_hours = 1.0
riders = float(data[1])
old_key = this_key
# your code here
print "{0}\t{1}".format(old_key, riders/num_hours)
reducer()
|
[
"xiewisdom@gmail.com"
] |
xiewisdom@gmail.com
|
a11b54daac573504f8b596a4715a12396b6239a4
|
d5a32e532fe231c16e52149604f0db34c5f4d2f9
|
/binarysearch.io/balanced_brackets_II.py
|
562ba59830209f53abf53bc59e7af7b3cc668ba9
|
[
"MIT"
] |
permissive
|
mishrakeshav/Competitive-Programming
|
93705f63337639e8464c1d50f3394434b7422f15
|
00c1bd272646754ca4c260d57989304c8e323838
|
refs/heads/master
| 2023-07-06T07:32:23.042324
| 2023-06-29T15:27:24
| 2023-06-29T15:27:24
| 216,195,590
| 3
| 3
|
MIT
| 2020-10-03T07:55:18
| 2019-10-19T11:27:53
|
Python
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Solution:
def solve(self, s):
# Write your code here
stack = []
string_open = ["(", "[", "{" ]
string_closed = [")", "]", "}"]
for i in s:
if i in string_open:
stack.append(i)
else:
if len(stack) == 0:
return False
if string_closed.index(i) != string_open.index(stack.pop()):
return False
return len(stack) == 0
|
[
"mishrakeshav@users.noreply.github.com"
] |
mishrakeshav@users.noreply.github.com
|
3cc2d219b945557f9b170857474e1e2c2f0bce18
|
94a16dc81580ba7fb8776775e479e097ffa07a2e
|
/Base/string_base.py
|
45835074661ad68f0cb5d0a8c28413bfbb81bb9d
|
[] |
no_license
|
lixiang007666/Algorithm_LanQiao
|
0cc0cf17056125c963e26cc4eb539716128410d7
|
9e70788fbab774956f61ceafb12ad98cb8d42c3a
|
refs/heads/master
| 2022-12-31T04:45:07.110726
| 2020-10-17T05:54:32
| 2020-10-17T05:54:32
| 304,841,096
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import math
import cmath
import sys
import string
import heapq
import bisect
from queue import Queue,PriorityQueue,LifoQueue
from collections import Counter,deque
from itertools import permutations,combinations
from functools import cmp_to_key
s="rwegwe"
print(s.count("rw"))
print(s.endswith("e"))
print(s.startswith("r"))
print(s.find("1"))#-1
print(s.index("r"))
print(s.find("r"))
print(s.islower())
print(s.upper())
print(s.isupper())#return
print(s.replace("r","1"))
print(s)
print(s.replace("rw","1"))
|
[
"54010254+lixiang007666@users.noreply.github.com"
] |
54010254+lixiang007666@users.noreply.github.com
|
422de43b4ef0122243bded337050617c53c4ebc2
|
8a42fc0a988ab3c03a8a74c6c75452acfe289b4d
|
/changing_ar.py
|
62306f64242aa45ea1c080f38186d983c1560c38
|
[] |
no_license
|
rw1993/PyAr
|
5c5d5fc4a90b08b9b4a6e0883141b491ccaf45da
|
a09bc8ba4cda11f4e4cccdc4b6575b61ae0f18e0
|
refs/heads/master
| 2021-01-17T17:39:30.240020
| 2017-05-04T06:51:48
| 2017-05-04T06:51:48
| 60,001,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
# -*- coding: utf-8 -*-
# author: rw
# E-mail: weiyanjie10@gmail.com
import random
import numpy
class AR(object, ):
def __init__(self, parameters, frequency, sd=0.3):
self.parameters = parameters
self.p = len(self.parameters[0])
self.frequency = frequency
self.group = len(self.parameters)
self._xs = [random.random() for i in range(self.p)]
self.sd = 0.3
@property
def noise(self):
return numpy.random.normal(scale=self.sd)
@property
def timeseries(self):
group_count = 0
while group_count < self.group:
group_count += 1
frequency_count = 0
parameter = numpy.array(self.parameters.pop())
while frequency_count < self.frequency:
past_p_xs = numpy.array(self._xs[-self.p:])
new_x = parameter.dot(past_p_xs)
new_x += self.noise
if new_x > 1.0:
new_x = 1.0
elif new_x < -1.0:
new_x = -1.0
self._xs.append(new_x)
frequency_count += 1
return self._xs
|
[
"470177412@qq.com"
] |
470177412@qq.com
|
6ce9552ca184f38e6e2d9c997408e19359780a15
|
fe0017ae33385d7a2857d0aa39fa8861b40c8a88
|
/env/lib/python3.8/site-packages/sklearn/linear_model/omp.py
|
e78001aa19b4c7faf0b95c9506bd156e52f47cdb
|
[] |
no_license
|
enriquemoncerrat/frasesback
|
eec60cc7f078f9d24d155713ca8aa86f401c61bf
|
e2c77f839c77f54e08a2f0930880cf423e66165b
|
refs/heads/main
| 2023-01-03T23:21:05.968846
| 2020-10-18T21:20:27
| 2020-10-18T21:20:27
| 305,198,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
# mypy error: Module X has no attribute y (typically for C extensions)
from . import _omp # type: ignore
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.linear_model.omp'
correct_import_path = 'sklearn.linear_model'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_omp, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
|
[
"enriquemoncerrat@gmail.com"
] |
enriquemoncerrat@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.