blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18e7fca8d4143e471221237e49f1a2367953adc1
|
1f2445a190e1fdb36b8ace15cdd1de799c4c61b4
|
/arst/command_push.py
|
cc707d2a8c5c30ec522be85979a30474551b1f43
|
[] |
no_license
|
bmustiata/ars-py
|
f9860a48bb63bdf781627108e57d4a8431e35f0a
|
0a641399807f17efeb50d355770f014721aa4d38
|
refs/heads/master
| 2021-06-13T01:05:40.023617
| 2020-11-08T23:50:01
| 2020-11-08T23:50:01
| 140,120,138
| 0
| 0
| null | 2021-03-26T00:27:14
| 2018-07-07T22:15:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
import os.path
import pathlib
import shutil
from typing import List
from termcolor_util import yellow
def push_files_to_template(
projects_folder: str, project_name: str, files_to_push: List[str]
) -> None:
for file_name in files_to_push:
recursively_push_file(projects_folder, project_name, file_name)
def recursively_push_file(
projects_folder: str, project_name: str, file_name: str
) -> None:
print(
yellow("Pushing"),
yellow(file_name, bold=True),
yellow("to"),
yellow(project_name, bold=True),
)
target_file_name = os.path.join(projects_folder, project_name, file_name)
if os.path.isdir(file_name):
pathlib.Path(target_file_name).mkdir(parents=True, exist_ok=True)
for nested_file_name in os.listdir(file_name):
recursively_push_file(
projects_folder=projects_folder,
project_name=project_name,
file_name=os.path.join(file_name, nested_file_name),
)
return
pathlib.Path(target_file_name).parent.mkdir(parents=True, exist_ok=True)
shutil.copy(file_name, target_file_name)
|
[
"bogdan.mustiata@gmail.com"
] |
bogdan.mustiata@gmail.com
|
56e5633ed8f47feba79f4997982cf5abf1919f18
|
941b25a0d0ccd25e4e64293defc2b50a61fccb01
|
/Board.py
|
cb026329b6c90b83267f78d6cc90800b607a043b
|
[] |
no_license
|
fanzhangg/sliding-tiles
|
c5a396818ec2d7449309f773df37a46ec7b41c8e
|
334bb7df76436aa9429ff6132db8a9ea1afce35f
|
refs/heads/master
| 2020-04-08T20:06:37.554387
| 2018-11-29T15:20:52
| 2018-11-29T15:20:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
class Board:
def __init__(self, row, col):
self.ROW = row
self.COL = col
self.tiles = []
def initialize_board(self, tiles: tuple or list) -> None:
expected_size = self.ROW * self.COL - 1
print('The expected size is', expected_size)
if len(tiles) == expected_size:
self.tiles.append(tiles)
elif len(tiles) >= expected_size:
raise IndexError("The size of tiles exceeds the expected size")
else:
raise IndexError("The size of tiles less than the expected size")
|
[
"vanadiumzhang@gmail.com"
] |
vanadiumzhang@gmail.com
|
f37a13bf85a34be9aa47183fa9763dcb850c8bbf
|
4fb1239a9fd39b0abecde0f0f0b81e1cb3b8f44a
|
/isy994/items/devices/device_insteon_contact.py
|
3a2c8b43a4fe7d50bf9c0317dd32185ab7f7d5a2
|
[
"MIT"
] |
permissive
|
simplextech/ISY994v5
|
f361066e7712bc590077bdf2799223b136ac6266
|
f8e485452a02113f95b1a0f1a57a4d30075c1070
|
refs/heads/master
| 2020-07-23T00:26:16.730900
| 2019-09-10T03:16:34
| 2019-09-10T03:16:34
| 207,382,914
| 0
| 0
|
MIT
| 2019-09-09T18:58:59
| 2019-09-09T18:58:59
| null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
#! /usr/bin/env python
from .device_contact import Device_Contact
from .device_insteon_base import Device_Insteon_Base
class Device_Insteon_Contact(Device_Contact,Device_Insteon_Base):
def __init__(self, container, device_info):
Device_Contact.__init__(self,container,device_info.name,device_info.address)
Device_Insteon_Base.__init__(self, device_info)
if device_info.property_value:
try:
if int(device_info.property_value) > 0:
self.set_property('contact','open')
else:
self.set_property('contact','closed')
except:
pass
def process_websocket_event(self,event):
if event.control == 'ST':
if int(event.action) > 0:
self.set_property('contact','open')
else:
self.set_property('contact','closed')
|
[
"mike@4831.com"
] |
mike@4831.com
|
ad0204c58539514ac30daab92ddf9c5e97cfa7ad
|
d49390f27ee954ddf5f7f1a9876320e1adec8ad4
|
/tests/test_utils.py
|
7e47d12b47398cef00ccd93ae15f60caac575053
|
[
"MIT"
] |
permissive
|
ssato/pytest-data-from-files
|
431e83827d4a2e14dd029089c8fa020b821b0683
|
deca8e3ae50351299f90bfe1561e5a32474812fa
|
refs/heads/main
| 2023-08-19T06:32:59.056406
| 2021-10-13T15:15:07
| 2021-10-13T15:15:07
| 412,952,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
#
# Copyright (C) 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
# pylint: disable=missing-docstring
"""Test cases for tests.utils."""
import pytest
from . import utils as TT
def test_get_basename():
assert TT.get_basename(__file__) == 'utils'
@pytest.mark.parametrize(
('xss', 'exp'),
(
([[]], []),
((()), []),
([[1, 2, 3], [4, 5]], [1, 2, 3, 4, 5]),
([[1, 2, 3], [4, 5, [6, 7]]], [1, 2, 3, 4, 5, [6, 7]]),
(((1, 2, 3), (4, 5, (6, 7))), [1, 2, 3, 4, 5, (6, 7)]),
(((i, i * 2) for i in range(3)), [0, 0, 1, 2, 2, 4]),
)
)
def test_concat(xss, exp):
assert list(TT.concat(xss)) == exp
# vim:sw=4:ts=4:et:
|
[
"satoru.satoh@gmail.com"
] |
satoru.satoh@gmail.com
|
a59342015429a8fe98d84c3688d5f300989c9754
|
ef158af9d47fb1f0c974b49405174ba5b34e4721
|
/polu/prediction_site/incendie.py
|
7d9797880c7e7d88bf9c409c3146b094c9c89755
|
[] |
no_license
|
LeGrosLezard/bobo
|
1227bcae22d9eb7d9e0423009cae154df5466994
|
7c50de512fb22c8bdf1a1127307fc4fd2f371152
|
refs/heads/master
| 2020-07-01T17:38:14.145955
| 2019-07-01T21:29:49
| 2019-07-01T21:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
import os
import cv2
import json
import requests
import datetime
import urllib.request
from bs4 import *
import datetime
PATH_LYON = 'https://www.lyoncapitale.fr/?s=incendie'
PATH_MARSEILLE = 'https://www.20minutes.fr/search?q=incendie+marseille'
PATH_PARIS = 'https://www.20minutes.fr/search?q=incendie+paris'
def incendie(ville):
date = datetime.datetime.now()
jour = date.day
mois = date.month
année = date.year
dico = {'1':'janvier','2':'fevrier','3':'mars','4':'avril',
'5':'mai','6':'juin','7':'juillet','8':'août',
'9':'septembre','10':'octobre','11':'novembre','12':'decembre'}
for cle, valeur in dico.items():
if str(mois) == cle:
mois = valeur
ville = ville.lower()
if ville == 'lyon':
path = PATH_LYON
r = requests.get(path)
page = r.content
soup = BeautifulSoup(page, "html.parser")
propriete = soup.find_all("div")
liste = []
liste.append(str(propriete))
daate = str(jour) + ' ' + str(mois) + ' ' + str(année)
a = str(liste).find(str(daate))
elif ville == 'paris':
path = PATH_PARIS
elif ville == 'marseille':
path = PATH_MARSEILLE
r = requests.get(path)
page = r.content
soup = BeautifulSoup(page, "html.parser")
propriete = soup.find_all("div")
liste = []
liste.append(str(propriete))
a = str(liste).find('incendie')
liste = liste[0][a-1000:a+1000]
mois_chi = date.month
c = 0
for i in str(mois_chi):
c+=1
if c == 1:
daate1 = str(année) + '-0' + str(mois_chi)+'-'+str(jour)
daate3 = str(jour) + '-0' + str(mois_chi)+'-'+str(année)
else:
daate1 = str(année) + '-' + str(mois_chi)+'-'+str(jour)
daate3 = str(jour) + '-' + str(mois_chi)+'-'+str(année)
daate = str(jour) + ' ' + str(mois) + ' ' + str(année)
b = str(liste).find(daate)
c = str(liste).find(daate1)
d = str(liste).find(daate3)
if b >= 0 or c >= 0 or d >=0:
return 'oui'
|
[
"noreply@github.com"
] |
LeGrosLezard.noreply@github.com
|
301e49dbb4d7faa407729ff514dd2a4fecdb2b45
|
540dbf1622959cc1b3a6853379f6f1f502bdbc24
|
/offers/urls.py
|
4ea3e242310521d73bca6bf928f385ece1bf7173
|
[
"ISC"
] |
permissive
|
pmaigutyak/mp-shop-offers
|
a5031af2827cee34e671793164ea786d2c4ee016
|
6f3d0934193c7d727d41481b0ebb164b03e34808
|
refs/heads/master
| 2023-08-02T23:02:09.498271
| 2023-07-19T11:01:34
| 2023-07-19T11:01:34
| 148,797,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from django.urls import path
from offers import views
app_name = 'offers'
urlpatterns = [
path('modal/<int:product_id>/', views.get_price_offer_modal, name='modal'),
path('send/<int:product_id>/', views.send_price_offer, name='send'),
]
|
[
"pmaigutyak@gmail.com"
] |
pmaigutyak@gmail.com
|
99eb738456b493ec54c68eba2eeb0e72884b2519
|
09a8e7ca229a6a95cdd4a426a1c220e9daf98355
|
/RBSP/Tools/IntegrateSpectrum.py
|
afe1dc5f26134ae4cc762e18d03b76f09fcdcc78
|
[
"MIT"
] |
permissive
|
mattkjames7/RBSP
|
8547bda29ec6c878483c21dc3281852abb256bd4
|
25c37cbba18b681a2f9a4e7955e49a49fc36d698
|
refs/heads/master
| 2023-07-20T00:31:43.487076
| 2023-07-14T12:30:04
| 2023-07-14T12:30:04
| 177,828,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import numpy as np
from .RelVelocity import RelVelocity
def IntegrateSpectrum(E,PSD,m,Omega,Erange=(0.0,np.inf)):
'''
Integrate the phase space density to get the partial density.
Inputs
======
E : float
Energy in keV.
PSD : float
Phase space density s^3 m^-6. (this includes density)
m : float
Mass in kg.
Erange : tuple
2-element tuple specifying the energy range over which to
integrate.
Returns
=======
n : float
Partial density.
NOTE: This probably won't work for reletavistic particles, so I
should probably rewrite this in terms of E instead of V
'''
#firstly work out the number of spectra
if len(PSD.shape) == 1:
ns = 1
else:
ns = PSD.shape[0]
#limit E and PSD to within the energy range
if len(E.shape) == 1:
e = np.array([E]*ns)
else:
e = np.array(E)
etmp = np.nanmean(e,axis=0)
use = np.where((etmp >= Erange[0]) & (etmp <= Erange[1]))[0]
e = e[:,use]
if len(PSD.shape) == 1:
p = np.array([PSD[use]]).astype('float64')
else:
p = PSD[:,use].astype('float64')
#convert E to V
v = RelVelocity(e,m).astype('float64')
#integrate, convert to cm^-2
n = np.zeros(ns,dtype='float64')
pv2 = p*v**2
for i in range(0,ns):
use = np.where(p[i] > 0)[0]
if use.size > 1:
n[i] = 1e-6*np.trapz(pv2[i,use],v[i,use])*Omega
else:
n[i] = np.nan
return n
|
[
"mattkjames7@gmail.com"
] |
mattkjames7@gmail.com
|
7f0a5b1b270b7e230361b6f791d4e0774ca6ed98
|
dbe7e1d9fe2457c26f83095d941e4392e7d30f8c
|
/django_dashboard/api/test.py
|
81e1e97da6599b88239c60f04be0a535b2b26525
|
[
"MIT"
] |
permissive
|
keepexploring/smartbiogas
|
51e124735ec04bc6b87a8ac75c66c83de6865001
|
ca663435b05666113e3c0cb55e6f087c61497208
|
refs/heads/master
| 2022-12-12T10:42:37.412038
| 2018-07-18T15:29:04
| 2018-07-18T15:29:04
| 111,402,799
| 0
| 0
|
MIT
| 2022-12-08T00:56:54
| 2017-11-20T11:39:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
class Container(models.Model):
pass
class ContainerItem(models.Model):
blog = models.ForeignKey('Container', related_name='items')
# For testing purposes only
class ContainerResource(ModelResource):
class Meta:
queryset = Container.objects.all()
authorization = Authorization()
class ContainerItemResource(ModelResource):
blog = tastypie.fields.ForeignKey(ContainerResource, 'blog')
class Meta:
queryset = ContainerItem.objects.all()
authorization = Authorization()
class BiogasPlants(models.Model):
pass
class PendingJobs(models.Model):
blog = models.ForeignKey('BiogasPlants', related_name='items')
# For testing purposes only
class BiogasPlantResource(ModelResource):
class Meta:
queryset = BiogasPlants.objects.all()
authorization = Authorization()
class PendingJobResource(ModelResource):
blog = tastypie.fields.ForeignKey(BiogasPlantResource, 'blog')
class Meta:
queryset = ContainerItem.objects.all()
authorization = Authorization()
|
[
"joel.c@scene.community"
] |
joel.c@scene.community
|
b7243813f8bf290dd27c60441ca3c453a67e225f
|
024214bff5cdd43874d30143ee90131a985c5ce3
|
/vycontrol/interface/urls.py
|
0c3aaab513ffb0106d0732d02cc9e8dd19f611e2
|
[
"MIT"
] |
permissive
|
KennethEhmsen/vycontrol
|
c20e2d793bd03005ddfd314abc62b4ca3fd292e3
|
5dbebfa1b299ad20b60131d95291ee91c3b9df5c
|
refs/heads/master
| 2023-08-10T11:43:36.001119
| 2020-05-23T05:03:47
| 2020-05-23T05:03:47
| 267,380,513
| 1
| 0
|
MIT
| 2021-09-22T19:40:01
| 2020-05-27T17:13:06
| null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
from django.urls import path
from . import views
app_name = 'interface'
urlpatterns = [
path('', views.index, name='interface-list'),
path('interface-show/<slug:interface_type>/<slug:interface_name>', views.interfaceshow, name='interface-show'),
path('interface-firewall/<slug:interface_type>/<slug:interface_name>', views.interfacefirewall, name='interface-firewall'),
]
|
[
"roberto.berto@gmail.com"
] |
roberto.berto@gmail.com
|
3a9e4f92085e755aa3e85445bf48abb902e2c0a1
|
df560dde5ffbae51187041f422c87f7d1544cbe9
|
/leetcode/python/841_keys_and_rooms.py
|
f8f42bb5c92c2b5c43d5bc8fa4770db223f3a8f4
|
[
"MIT"
] |
permissive
|
VVKot/coding-competitions
|
61c97dbc4fdaeb0a35ff7fa8e55529b579fd1ebb
|
7d6e599b223d89a7861929190be715d3b3604fa4
|
refs/heads/master
| 2021-07-04T17:47:34.246535
| 2020-09-23T20:28:30
| 2020-09-23T20:28:30
| 174,696,391
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
class Solution:
def canVisitAllRooms(self, rooms):
stack = [0]
visited = set(stack)
while stack:
curr = stack.pop()
for room in rooms[curr]:
if room not in visited:
stack.append(room)
visited.add(room)
if len(visited) == len(rooms): return True
return len(visited) == len(rooms)
|
[
"volodymyr.kot.ua@gmail.com"
] |
volodymyr.kot.ua@gmail.com
|
98d67adeb2aa151c263faa61a4956cb8f620a667
|
fa76cf45d7bf4ed533e5a776ecd52cea15da8c90
|
/robocorp-python-ls-core/src/robocorp_ls_core/libs/robocop_lib/robocop/exceptions.py
|
a5d30bd12b3f4d63a76dca5367602c808aafd756
|
[
"Apache-2.0"
] |
permissive
|
martinRenou/robotframework-lsp
|
8a5d63b7cc7d320c9fed2372a79c8c6772d6481e
|
5f23b7374139e83d0aa1ebd30675e762d7a0db86
|
refs/heads/master
| 2023-08-18T22:26:01.386975
| 2021-10-25T13:46:11
| 2021-10-25T13:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,180
|
py
|
class RobocopFatalError(ValueError):
pass
class ConfigGeneralError(RobocopFatalError):
pass
class DuplicatedRuleError(RobocopFatalError):
def __init__(self, rule_type, rule, checker, checker_prev):
msg = (
f"Fatal error: Message {rule_type} '{rule}' defined in {checker.__class__.__name__} "
f"was already defined in {checker_prev.__class__.__name__}"
)
super().__init__(msg)
class InvalidRuleSeverityError(RobocopFatalError):
def __init__(self, rule, severity_val):
msg = f"Fatal error: Tried to configure message {rule} with invalid severity: {severity_val}"
super().__init__(msg)
class InvalidRuleBodyError(RobocopFatalError):
def __init__(self, rule_id, rule_body):
msg = f"Fatal error: Rule '{rule_id}' has invalid body:\n{rule_body}"
super().__init__(msg)
class InvalidRuleConfigurableError(RobocopFatalError):
def __init__(self, rule_id, rule_body):
msg = f"Fatal error: Rule '{rule_id}' has invalid configurable:\n{rule_body}"
super().__init__(msg)
class InvalidRuleUsageError(RobocopFatalError):
def __init__(self, rule_id, type_error):
msg = f"Fatal error: Rule '{rule_id}' failed to prepare message description with error: {type_error}"
super().__init__(msg)
class InvalidExternalCheckerError(RobocopFatalError):
def __init__(self, path):
msg = f'Fatal error: Failed to load external rules from file "{path}". Verify if the file exists'
super().__init__(msg)
class FileError(RobocopFatalError):
def __init__(self, source):
msg = f'File "{source}" does not exist'
super().__init__(msg)
class ArgumentFileNotFoundError(RobocopFatalError):
def __init__(self, source):
msg = f'Argument file "{source}" does not exist'
super().__init__(msg)
class NestedArgumentFileError(RobocopFatalError):
def __init__(self, source):
msg = f'Nested argument file in "{source}"'
super().__init__(msg)
class InvalidArgumentError(RobocopFatalError):
def __init__(self, msg):
super().__init__(f"Invalid configuration for Robocop:\n{msg}")
|
[
"fabiofz@gmail.com"
] |
fabiofz@gmail.com
|
4a6c127e11f77fef064cd6c080ef8f541b4ba12b
|
10ec999d273b9a75b3cc5c3e353d611ea7f163b6
|
/grappa/operators/attributes.py
|
7748bfdcbeb82c4d1d8d8cbe054f6887bb88da79
|
[
"MIT"
] |
permissive
|
jcassee/grappa
|
721bec7ee2d21fc3fc857f77b9ff6d58b941bfd9
|
b128da8aef67501c310701c47508e7318241aa8b
|
refs/heads/master
| 2020-05-05T02:24:30.106837
| 2018-10-02T10:29:16
| 2018-10-02T10:29:16
| 179,636,623
| 0
| 0
|
MIT
| 2019-04-05T07:24:31
| 2019-04-05T07:24:31
| null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
# -*- coding: utf-8 -*-
from ..decorators import attribute
@attribute(
operators=(
'to', 'has', 'have', 'satisfy', 'that', 'that_is',
'satisfies', 'include', 'do', '_is', 'which', 'which_is'
)
)
def be(ctx):
"""
Semantic attributes providing chainable declarative DSL
for assertions.
"""
ctx.negate = False
@attribute(operators=(
'not_to', 'to_not', 'does_not', 'do_not', '_not', 'not_satisfy',
'not_have', 'not_has', 'have_not', 'has_not', 'dont', 'is_not',
'which_not', 'that_not'
))
def not_be(ctx):
"""
Semantic negation attributes providing chainable declarative DSL
for assertions.
"""
ctx.negate = True
|
[
"tomas@aparicio.me"
] |
tomas@aparicio.me
|
dd4e5116fd1f9eecb02b439401c88eaef08b236e
|
603a54e14440fbe1c8aaab34c54aedff83489ca5
|
/violate_ratio/without_batching/edge_serving_resnet/server.py
|
28b9ec0e0aff9025b90bc2aaecb24a34f9b54f54
|
[
"MIT"
] |
permissive
|
fkh12345/ICE
|
211fdbc62211e6df6c3d90b6ff309bc8c854e01f
|
f6ed4933c85b541fb971f5e1bfaa814e4e613c32
|
refs/heads/main
| 2023-04-12T02:05:57.478650
| 2023-03-01T18:08:55
| 2023-03-01T18:08:55
| 500,040,223
| 51
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
import grpc
import time
from concurrent import futures
import inference_pb2 as data_pb2
import inference_pb2_grpc as data_pb2_grpc
from serving import Serving
import io
import torch
import torch.nn as nn
import numpy as np
import torchvision.models as models
import argparse
from dnn_model.lapsrn import Net
from dnn_model.dfcnn import DFCNN
from dnn_model.yolo import Yolov3
#from runtime import change_waiting_queue
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--qos', type=float, default=0.05)
parser.add_argument('--worker', type=int, default=0)
args = parser.parse_args()
model = models.resnet50()
#model = DFCNN(1000, 200)
#model = Yolov3()
model.set_profile(True)
model.set_input([0, 109])
model.set_profile(False)
model_input = model.get_input()
device = 'cuda:{}'.format(args.worker)
no_batching_server = Serving(model, device)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_HOST = '0.0.0.0'
_PORT = '808{}'.format(args.worker)
class FormatData(data_pb2_grpc.FormatDataServicer):
def DoFormat(self, request, context):
start1 = time.time()
str = request.text
start = request.start
end = request.end
#buffer = io.BytesIO(str)
#buffer.seek(0)
#input = torch.load(buffer)
index1 = model.push_data(start, str, [0, 109], "cpu")
launch = start1
index = no_batching_server.push_index(input, start, end, index1)
out, duration = no_batching_server.get_result(index)
#print(out.shape)
out_time = time.time()
return data_pb2.actionresponse(text=out_time - start1, queue=duration)
def serve():
grpcServer = grpc.server(futures.ThreadPoolExecutor(max_workers=5000))
data_pb2_grpc.add_FormatDataServicer_to_server(FormatData(), grpcServer)
grpcServer.add_insecure_port(_HOST + ':' + _PORT)
grpcServer.start()
try:
no_batching_server()
except KeyboardInterrupt:
grpcServer.stop(0)
if __name__ == '__main__':
serve()
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
647c137facd838ac6488a092188e30f81a6926b1
|
82aace1431e0af949b1294d979a16f8dc18f48c2
|
/Python-Web-Basics-Softuni/recipes/venv/Scripts/django-admin.py
|
a220eda67fd73b94ee6387a63bff85b7b9484757
|
[
"MIT"
] |
permissive
|
borisboychev/SoftUni
|
6778450417f889f8e89c709897b9e26c7129dbf6
|
22062312f08e29a1d85377a6d41ef74966d37e99
|
refs/heads/master
| 2023-03-27T13:11:17.378197
| 2021-03-26T09:14:25
| 2021-03-26T09:14:25
| 295,463,442
| 1
| 0
| null | 2020-10-12T12:54:13
| 2020-09-14T15:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
#!C:\Users\boris\SoftUni\Python-Web-Basics-Softuni\recipes\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"borisboychev007@Gmail.com"
] |
borisboychev007@Gmail.com
|
933bcaa20d4e347f811ae71388ddb4c071ba5196
|
a796865c5ff4dcb7c6a0c848364bd6f7cb3d7a29
|
/tests/test_newsgroup20.py
|
108161cf2027bfb2e2fbd7932f31709c2db1dccc
|
[
"Apache-2.0"
] |
permissive
|
yk/chazutsu
|
d625f6f7f682d713910ce59953841e507ad27262
|
ecc42c9ff0f8d47632ba4b4c7385a5fdf4386c10
|
refs/heads/master
| 2020-03-18T19:43:40.016170
| 2018-05-28T14:52:00
| 2018-05-28T14:52:00
| 135,173,517
| 0
| 0
|
Apache-2.0
| 2018-05-28T14:41:16
| 2018-05-28T14:41:16
| null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
import os
import sys
import shutil
import unittest
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import chazutsu.datasets
DATA_ROOT = os.path.join(os.path.dirname(__file__), "data")
class TestNewsGroup20(unittest.TestCase):
def test_extract(self):
r = chazutsu.datasets.NewsGroup20().download(directory=DATA_ROOT, test_size=0)
try:
with open(r.data_file_path, encoding="utf-8") as f:
for ln in f:
els = ln.split("\t")
if len(els) != 5:
print(els)
print(len(els))
raise Exception("data file is not constructed by label and text.")
except Exception as ex:
if os.path.isfile(r.data_file_path):
os.remove(r.data_file_path)
self.fail(ex)
self.assertTrue(len(r.data().columns), 5)
if os.path.isfile(r.data_file_path):
os.remove(r.data_file_path)
shutil.rmtree(r.root)
def test_parse(self):
d = chazutsu.datasets.NewsGroup20()
subject, author, text = d.parse(raw_text=sample_text)
self.assertEqual(subject, "Re: Political Atheists?")
self.assertEqual(author, "Keith Allan Schneider")
self.assertTrue(text.startswith("If I"))
sample_text = """
From: keith@cco.caltech.edu (Keith Allan Schneider)
Subject: Re: Political Atheists?
bobbe@vice.ICO.TEK.COM (Robert Beauchaine) writes:
>>If I kill this person [an innocent person convicted of murder],
>>then a murder would be committed, but I would not be the murderer. At least,
"""
if __name__ == "__main__":
unittest.main()
|
[
"icoxfog417@yahoo.co.jp"
] |
icoxfog417@yahoo.co.jp
|
a6cdc1e51da55bb4298536a804dbb7eb2605ffe4
|
1cec8afab42c7ed104d414089e30c647b1c79974
|
/bubble_sort/main.py
|
e580cdb2e530946fa2fc46e6f50df54592bcdd83
|
[] |
no_license
|
yerassyl94/problems
|
ea84572aa89178a30fb030a16b2e4e23df3529b9
|
c5f3b391c2d94445d96726aaa8572580c4c662bc
|
refs/heads/master
| 2020-08-27T23:57:44.672878
| 2019-11-11T11:35:55
| 2019-11-11T11:35:55
| 217,526,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
def bubble_sort_list(_list: list):
length = len(_list) - 1
for x in range(length):
for y in range(length-x):
if _list[y] > _list[y+1]:
_list[y],_list[y+1]=_list[y+1],_list[y]
return _list
list_=[32,5,3,6,7,54,87]
print(bubble_sort_list(list_))
|
[
"you@example.com"
] |
you@example.com
|
83d7fc9253ee2cb0396a0315ed154dabcc0928ab
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/KCB_MM/YW_KCB_HASPRICELIMIT_GPMM_SHSJ_WDZC_129.py
|
d6747c9ad8bfc225ae04ed5405c0247a043c057c
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,273
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test//xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test//service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test//mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test//utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_KCB_HASPRICELIMIT_GPMM_SHSJ_WDZC_129(xtp_test_case):
def setUp(self):
pass
# YW_KCB_HASPRICELIMIT_GPMM_SHSJ_WDZC_129
def test_YW_KCB_HASPRICELIMIT_GPMM_SHSJ_WDZC_129(self):
title = '交易日五档即成转撤销卖-非最后一次卖:余额不足200股部分'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010111,
'errorMSG': queryOrderErrorMsg(11010111),
'是否生成报单': '是',
'是否是撤废': '否',
# '是否是新股申购': '',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('688000', '1', '4', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
print(stkparm['错误原因'])
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 99,
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 201
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
c82a3e03bf55d608e3c42b98c26484346d260228
|
7f8082a1a88ad6895d1e4b5cf9f3952fe8951ec3
|
/miniProject/bookcoversearch/search.py
|
1ded489e624618a5a884f2ce23c3e41bfed477d3
|
[
"MIT"
] |
permissive
|
matiji66/opencv_study
|
a33e3fed3e320598d8b3612b387b63658539176d
|
9b6354907609c9841915f6300ee5915a9d80906f
|
refs/heads/master
| 2020-03-11T03:34:28.198825
| 2018-01-15T14:04:08
| 2018-01-15T14:04:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
from __future__ import print_function
from pyimagesearch.coverdecriptor import CoverDescriptor
from pyimagesearch.covermatcher import CoverMatcher
import argparse
import glob
import csv
import cv2
ap=argparse.ArgumentParser()
ap.add_argument('-d', '--db', required=True,
help="path to the book database")
ap.add_argument('-c', '--covers', required=True,
help="path to the directory that contains the book covers")
ap.add_argument('-q', '--query', required=True,
help='path to the query book cover')
ap.add_argument('-s', '--sift',type=int, default=0,
help="whether or not SIFT should be used")
args=vars(ap.parse_args())
db={}
for l in csv.reader(open(args["db"])):
db[l[0]]=l[1:]
useSIFT=args['sift']>0
useHamming=args['sift']==0
ratio=0.7
minMatches=40
if useSIFT:
minMatches=50
cd=CoverDescriptor(useSIFT=useSIFT)
cv=CoverMatcher(cd, glob.glob(args['covers']+"/*.png"),
ratio=ratio, minMatches=minMatches, userHamming=useHamming)
queryImage=cv2.imread(args['query'])
gray=cv2.cvtColor(queryIzmage,cv2.COLOR_BGR2GRAY)
(queryKps, queryDescs)=cd.describe(gray)
results=cv.search(queryKps, queryDescs)
cv2.imshow("Query", queryImage)
if len(results)==0:
print("I could not find a match for that cover")
cv2.waitKey(0)
else:
for (i (score, coverPath)) in enumerate(results):
(author, title) =db[coverPath[coverPath.rfind("/")+1:]]
print("{}. {:.2f}% : {} - {}".format(i + 1, score * 100,
author, title))
result = cv2.imread(coverPath)
cv2.imshow("Result", result)
cv2.waitKey(0)
|
[
"scotthuang1989@163.com"
] |
scotthuang1989@163.com
|
a757418f6ddf8e7675ef47658b0381be8f8ab2c4
|
4a1b61cf551db7843050cc7080cec6fd60c4f8cc
|
/2020/백준문제/IM대비/14696_딱지놀이.py
|
b574a887a21deac01c3a3e0c4ea705dd1508157c
|
[] |
no_license
|
phoenix9373/Algorithm
|
4551692027ca60e714437fd3b0c86462f635d8ff
|
c66fd70e14bb8357318e8b8f386d2e968f0c4d98
|
refs/heads/master
| 2023-08-24T10:01:20.798430
| 2021-10-15T07:57:36
| 2021-10-15T07:57:36
| 288,092,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# 별4 > 동그라미3 > 네모2 > 세모1 / 무승부
# 무승부: D
N = int(input()) # 1~1000
for _ in range(N):
a = list(map(int, input().split()))[1:]
b = list(map(int, input().split()))[1:]
for i in range(4, 0, -1):
a_cnt = a.count(i)
b_cnt = b.count(i)
if a_cnt > b_cnt:
print('A')
break
elif b_cnt > a_cnt:
print('B')
break
else:
print('D')
|
[
"phoenix9373@naver.com"
] |
phoenix9373@naver.com
|
e2f95965a7e29da94724dd80ff83e894d2a77927
|
a875495c32524d1f7b05f07337f54df52de2aad0
|
/algo/clustering.py
|
6bc5b199bbb559d716d424618a3629e094646459
|
[
"Apache-2.0"
] |
permissive
|
adrianopls/GRIPy-X
|
a43188e1ffda37f9f668133f47c3e90589325753
|
21c7fa1f32f8dbb0a5dff93c2bac5acf1f9181ca
|
refs/heads/master
| 2021-07-01T16:57:16.094069
| 2021-03-09T19:29:23
| 2021-03-09T19:29:23
| 218,369,555
| 1
| 1
|
Apache-2.0
| 2019-10-30T18:13:12
| 2019-10-29T19:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,962
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.cluster import KMeans
from sklearn.mixture import GMM
from sklearn.preprocessing import scale
from sklearn import metrics
def locate_nans(data):
return np.sum(np.isnan(data), axis=1, dtype=bool)
def reorder_clusters(clusters, centers, covars=None):
nc = centers.shape[0]
nf = centers.shape[1]
if covars is None:
covars = np.empty((nc, nf, nf))
for i in range(nc):
covars[i] = np.eye(nf)
d2 = np.empty(nc)
for i in range(nc):
d2[i] = np.dot(np.dot(centers[i], covars[i]), centers[i].T)
argsort = np.argsort(d2)
new_clusters = np.empty_like(clusters)
for i in range(nc):
new_clusters[clusters == argsort[i]] = i
return new_clusters, argsort
def k_means(data, nc, req_info=None):
means = np.mean(data, axis=0)
stds = np.std(data, axis=0)
sdata = (data - means)/stds
km = KMeans(init='k-means++', n_clusters=nc, n_init=10)
km.fit(sdata)
if req_info == 'all':
req_info = ['silhouette', 'inertia', 'centers']
elif req_info is None:
req_info = []
info = {}
if 'silhouette' in req_info:
info['silhouette'] = metrics.silhouette_score(data, km.labels_)
if 'inertia' in req_info:
info['inertia'] = km.inertia_
if 'centers' in req_info:
info['centers'] = km.cluster_centers_*stds + means
return km.labels_, info
def expectation_maximization(data, nc, cv_type='full', req_info=None):
gmm = GMM(n_components=nc, covariance_type=cv_type, thresh=1.0E-4, n_init=10)
gmm.fit(data)
labels = gmm.predict(data)
if req_info == 'all':
req_info = ['aic', 'bic', 'converged', 'weights', 'means', 'covars',
'silhouette', 'proba']
elif req_info is None:
req_info = []
info = {}
if 'aic' in req_info:
info['aic'] = gmm.aic(data)
if 'bic' in req_info:
info['bic'] = gmm.bic(data)
if 'converged' in req_info:
info['converged'] = gmm.converged_
if 'weights' in req_info:
info['weights'] = gmm.weights_
if 'means' in req_info:
info['means'] = gmm.means_
if 'covars' in req_info:
if cv_type == 'full':
info['covars'] = gmm.covars_
elif cv_type == 'tied':
cov = np.empty((nc, gmm.covars_.shape[0], gmm.covars_.shape[1]))
for i in range(nc):
cov[i] = gmm.covars_.copy()
info['covars'] = cov
else:
cov = np.empty((nc, gmm.covars_.shape[0], gmm.covars_.shape[1]))
for i in range(nc):
cov[i] = np.diag(gmm.covars_[i])
info['covars'] = cov
if 'silhouette' in req_info:
info['silhouette'] = metrics.silhouette_score(data, labels)
if 'proba' in req_info:
info['proba'] = gmm.predict_proba(data).T
return labels, info
|
[
"adrianopaulo@gmail.com"
] |
adrianopaulo@gmail.com
|
fd7d229621c66ffd6a7ebdf6880af5d34b8f6d9a
|
bf10231754c95afd40d15b859f31b6157e1d991d
|
/Conversores a flotante.py
|
2372562d67d2de753418adaf194ee28d043af7dd
|
[] |
no_license
|
smith-sanchez/validadores_en_python
|
de7849b0f6dde93fff6c8a2db53070f60d101688
|
8dab14429c1e2c5345cddeabff5e10269b6dc6e9
|
refs/heads/master
| 2020-09-06T22:10:59.153382
| 2019-11-09T01:12:27
| 2019-11-09T01:12:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
#Convertir el entero 43 a flotante
x=43
a=float(x)
print(a, type(a))
#Convertir el entero 1453 a flotante
x=1453
b=float(x)
print(b, type(b))
#Convertir el entero 4685 a flotante
x=4685
y=float(x)
print(y, type(y))
#convertir la cadena 1128 a flotante
x="1128"
c=float(x)
print(c, type(c))
#Convertir la cadena -30 a flotante
x="-30"
d=float(x)
print(d, type(d))
#Convertir el booleano 50!=61 a flotante
x=(50!=61)
e=float(x)
print(e, type(e))
#Convertir el booleano 90>91 a flotante
x=(90>91)
g=float(x)
print(g, type(g))
#Convertir el booleano 500>=500 a flotante
x=(500>=500)
h=float(x)
print(h, type(h))
#Convertir el flotante 346.89 a flotante
x=346.89
j=float(x)
print(j, type(j))
#Convertir el flotante 0.68 a flotante
x=0.68
n=float(x)
print(n, type(n))
|
[
"noreply@github.com"
] |
smith-sanchez.noreply@github.com
|
780352d72181bf550978615068022bcd6a8c8189
|
643c1132ac737dcaa68b887ee77155883a762329
|
/static_frame/core/index_auto.py
|
034e2af0da3c4bf524906972bdb89548aec3fffb
|
[
"MIT"
] |
permissive
|
flexatone/static-frame
|
baa071ce61143baabba9b6cd6bae453757ce861b
|
b41ff788c651ee9fe7006a2404a615c09034a3b6
|
refs/heads/master
| 2020-08-28T05:06:46.000715
| 2019-10-24T16:34:16
| 2019-10-24T16:34:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,403
|
py
|
import typing as tp
# from static_frame.core.index_base import IndexBase
from static_frame.core.index import Index
# from static_frame.core.index_hierarchy import IndexHierarchy
from static_frame.core.index import IndexGO
from static_frame.core.util import IndexConstructor
from static_frame.core.util import DTYPE_INT_DEFAULT
IndexAutoInitializer = int
# could create trival subclasses for these indices, but the type would would not always describe the instance; for example, an IndexAutoGO could grow inot non-contiguous integer index, as loc_is_iloc is reevaluated with each append can simply go to false.
#
# class IndexAuto(Index):
# pass
# class IndexAutoGO(IndexGO):
# pass
class IndexAutoFactory:
# @classmethod
# def from_is_static(cls,
# initializer: IndexAutoInitializer,
# *,
# is_static: bool,
# ) -> tp.Union[Index, IndexGO]:
# '''
# Args:
# initializer: An integer, or a sizable iterable.
# is_static: Boolean if this should be a static (not grow-only) index.
# '''
# labels = range(initializer)
# constructor = Index if is_static else IndexGO
# return constructor(
# labels=labels,
# loc_is_iloc=True,
# dtype=DTYPE_INT_DEFAULT
# )
# @classmethod
# def from_constructor(cls,
# initializer: IndexAutoInitializer,
# *,
# constructor: IndexConstructor,
# ) -> tp.Union[Index, IndexHierarchy]:
# labels = range(initializer)
# return constructor(labels)
@classmethod
def from_optional_constructor(cls,
initializer: IndexAutoInitializer,
*,
default_constructor: IndexConstructor,
explicit_constructor: tp.Optional[IndexConstructor] = None,
) -> tp.Union[Index, IndexGO]:
labels = range(initializer)
if explicit_constructor:
return explicit_constructor(labels)
else: # get from default constructor
constructor = Index if default_constructor.STATIC else IndexGO
return constructor(
labels=labels,
loc_is_iloc=True,
dtype=DTYPE_INT_DEFAULT
)
IndexAutoFactoryType = tp.Type[IndexAutoFactory]
|
[
"ariza@flexatone.com"
] |
ariza@flexatone.com
|
880d01f91b79c5bae80d1fbcc5492413031f7d76
|
5f86944bdf1b810a84c63adc6ed01bbb48d2c59a
|
/kubernetes/test/test_v1_service_status.py
|
1ce1ef5f3c6c9bd4502827d24bfdea8989ba9849
|
[
"Apache-2.0"
] |
permissive
|
m4ttshaw/client-python
|
384c721ba57b7ccc824d5eca25834d0288b211e2
|
4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1
|
refs/heads/master
| 2021-01-13T06:05:51.564765
| 2017-06-21T08:31:03
| 2017-06-21T08:31:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 859
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_service_status import V1ServiceStatus
class TestV1ServiceStatus(unittest.TestCase):
""" V1ServiceStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ServiceStatus(self):
"""
Test V1ServiceStatus
"""
model = kubernetes.client.models.v1_service_status.V1ServiceStatus()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
6c6b92be69dae51b2f5d2845518c3defea495abe
|
6adc166c33bdada82f7509301e25d1c451852d24
|
/log/log_config.py
|
3fb9c0d9f12ce15977115f4fc21f827713b9fd2f
|
[] |
no_license
|
liushiwen555/unified_management_platform_backend
|
ab0a92f38be7f68bc1d3c4570560ea89bf8fcb07
|
ae1ade20044b59de1e29288fcd61ba0b71d92be3
|
refs/heads/master
| 2023-06-25T22:45:24.669052
| 2021-07-27T01:08:24
| 2021-07-27T01:08:24
| 389,808,950
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from log.log_content import log_config
"""
LOG_CONFIG_DICT结构为:
LOG_CONFIG_DICT{
URL_NAME:{
HTTP_METHOD: 生成log的类
}
}
"""
LOG_CONFIG_DICT = log_config.get_config()
class ModelLog(object):
def __init__(self):
self.conf_dict = LOG_CONFIG_DICT
def log(self, request, request_body, result, response, *args, **kwargs):
try:
# 获取url对应的name
url_name = request.resolver_match.url_name
method = request.method
log_generator = self.conf_dict[url_name][method]
log_generator_instance = log_generator(
request, request_body, result, response, *args, **kwargs)
log_generator_instance.generate_log()
except Exception as e:
# print(e)
pass
# url_name = request.resolver_match.url_name
# method = request.method
# log_generator = self.conf_dict[url_name][method]
# log_generator_instance = log_generator(
# request, request_body, result, response, *args, **kwargs)
# log_generator_instance.generate_log()
|
[
"1599932996@qq.com"
] |
1599932996@qq.com
|
2630abbdd53c1fc1ab734fcfcc1a981f0a351e73
|
8bbe2351bbd157a46ccf8530cde4e4cc7b0bd3b7
|
/trashed_20170508/evan/Entropy.py
|
ed75ce66a6a3b227a188a9ff686da63efd021400
|
[] |
no_license
|
airuibel/py_code
|
8dc98d71e79a4c0f785ad5cf81b2ca2073061ebf
|
1da9a9dcd37475dd14bab6ae58bca1e2dff4c251
|
refs/heads/master
| 2020-06-18T03:47:43.754204
| 2018-03-20T09:31:00
| 2018-03-20T09:31:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
from __future__ import division
__author__ = 'Victor Ruiz, vmr11@pitt.edu'
import pandas as pd
import numpy as np
from math import log
import random
def entropy(data_classes, base=2):
'''
Computes the entropy of a set of labels (class instantiations)
:param base: logarithm base for computation
:param data_classes: Series with labels of examples in a dataset
:return: value of entropy
'''
if not isinstance(data_classes, pd.core.series.Series):
raise AttributeError('input array should be a pandas series')
classes = data_classes.unique()
N = len(data_classes)
ent = 0 # initialize entropy
# iterate over classes
for c in classes:
partition = data_classes[data_classes == c] # data with class = c
proportion = len(partition) / N
#update entropy
ent -= proportion * log(proportion, base)
return ent
def cut_point_information_gain(dataset, cut_point, feature_label, class_label):
'''
Return de information gain obtained by splitting a numeric attribute in two according to cut_point
:param dataset: pandas dataframe with a column for attribute values and a column for class
:param cut_point: threshold at which to partition the numeric attribute
:param feature_label: column label of the numeric attribute values in data
:param class_label: column label of the array of instance classes
:return: information gain of partition obtained by threshold cut_point
'''
if not isinstance(dataset, pd.core.frame.DataFrame):
raise AttributeError('input dataset should be a pandas data frame')
entropy_full = entropy(dataset[class_label]) # compute entropy of full dataset (w/o split)
#split data at cut_point
data_left = dataset[dataset[feature_label] <= cut_point]
data_right = dataset[dataset[feature_label] > cut_point]
(N, N_left, N_right) = (len(dataset), len(data_left), len(data_right))
gain = entropy_full - (N_left / N) * entropy(data_left[class_label]) - \
(N_right / N) * entropy(data_right[class_label])
return gain
def datset_infomation_gain(datset,class_label='y'):
'''
Return information gain of attributes
'''
if not isinstance(dataset, pd.core.frame.DataFrame):
raise AttributeError('input dataset should be a pandas data frame')
|
[
"l"
] |
l
|
b0629394abb4bc1fca15b22b03658e2e3db5dbaf
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/programming_python/Dbase/Sql/dumpdb.py
|
5f9388d122656adc52dde04932b87d1bff4696fb
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811
| 2017-03-17T19:17:22
| 2017-03-17T19:17:22
| 85,346,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
"""
display table contents as raw tuples, or formatted with field names
command-line usage: dumpdb.py dbname? table? [-] (dash=formatted display)
"""
def showformat(recs, sept=('-' * 40)):
print(len(recs), 'records')
print(sept)
for rec in recs:
maxkey = max(len(key) for key in rec) # max key len
for key in rec: # or: \t align
print('%-*s => %s' % (maxkey, key, rec[key])) # -ljust, *len
print(sept)
def dumpdb(cursor, table, format=True):
if not format:
cursor.execute('select * from ' + table)
while True:
rec = cursor.fetchone()
if not rec: break
print(rec)
else:
from makedicts import makedicts
recs = makedicts(cursor, 'select * from ' + table)
showformat(recs)
if __name__ == '__main__':
import sys
dbname, format, table = 'dbase1', False, 'people'
cmdargs = sys.argv[1:]
if '-' in cmdargs: # format if '-' in cmdline args
format = True # dbname if other cmdline arg
cmdargs.remove('-')
if cmdargs: dbname = cmdargs.pop(0)
if cmdargs: table = cmdargs[0]
from loaddb import login
conn, curs = login(dbname)
dumpdb(curs, table, format)
|
[
"timothyshull@gmail.com"
] |
timothyshull@gmail.com
|
5d81f00e6ed97d46ea3a1c80d7a52c23666db239
|
91b2fb1fb6df216f2e365c3366bab66a567fc70d
|
/Week08/每日一题/342. 4的幂.py
|
90f118bb6733e9b684bf6a93850ca73ef1b6af84
|
[] |
no_license
|
hrz123/algorithm010
|
d17aee642f03f607a7984beb099eec18f2de1c8e
|
817911d4282d2e226518b3533dff28282a91b3d4
|
refs/heads/master
| 2022-12-20T14:09:26.365781
| 2020-10-11T04:15:57
| 2020-10-11T04:15:57
| 270,178,423
| 1
| 0
| null | 2020-06-07T03:21:09
| 2020-06-07T03:21:09
| null |
UTF-8
|
Python
| false
| false
| 2,606
|
py
|
# 342. 4的幂.py
# 1. 暴力解,不解释
# 2. 暴力解法+预计算
# 我们知道输入的整数是32位整数x<2**31-1的,因此我们最大的4的幂次是15
# 所以我们总共有0-15 16种可能性
# 3.大于0且log2后是一个偶数
# 4.位操作:4的幂,二进制位都在1,3,5等位置,00000001, 00000100, 00010000
# 而只是2的幂,二进制都在2,4,6等位置
# 135位置的16进制为0x55555555 32位表示,246的16进制为0xaaaaaaaa,位与上其中一个等于0或1
# 5. 位操作判断是2的幂,对3的余数为1的是4的幂,为2的话是2的幂
class Solution:
def __init__(self):
s = 1
self.nums = {1}
for i in range(15):
s *= 4
self.nums.add(s)
def isPowerOfFour(self, num: int) -> bool:
return num in self.nums
class Powers:
def __init__(self):
s = 1
self.nums = {1}
for i in range(15):
s *= 4
self.nums.add(s)
class Solution:
p = Powers()
def isPowerOfFour(self, num: int) -> bool:
return num in self.p.nums
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and not num & (num - 1) and not num & 0xaaaaaaaa
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and not num & (num - 1) and num & 0x55555555
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and (num & 0x55555555) and not num & (num - 1)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and not num & (num - 1) and bool(num & 0x55555555)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and not num & (num - 1) and bool(num & 0x55555555)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and not num & (num - 1) and bool(num & 0x55555555)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and not num & (num - 1) and bool(num & 0x55555555)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and num & 0x55555555 and not num & (num - 1)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and num & 0x55555555 and not num & (num - 1)
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and num & 0x55555555 and not num & (num - 1)
def main():
sol = Solution()
num = 16
res = sol.isPowerOfFour(num)
print(res)
if __name__ == '__main__':
main()
|
[
"2403076194@qq.com"
] |
2403076194@qq.com
|
81a42ccdd857bad5f3dc6eb2f2b2d73362e6ce9e
|
09fd456a6552f42c124c148978289fae1af2d5c3
|
/Array/1380.py
|
1060e1504031b93129f5620c6c68d483b799b102
|
[] |
no_license
|
hoang-ng/LeetCode
|
60b4e68cbcf54cbe763d1f98a70f52e628ab32fb
|
5407c6d858bfa43325363503c31134e560522be3
|
refs/heads/master
| 2021-04-10T11:34:35.310374
| 2020-07-28T10:22:05
| 2020-07-28T10:22:05
| 248,932,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# 1380. Lucky Numbers in a Matrix
# Given a m * n matrix of distinct numbers, return all lucky numbers in the matrix in any order.
# A lucky number is an element of the matrix such that it is the minimum element in its row and maximum in its column.
# Example 1:
# Input: matrix = [[3,7,8],[9,11,13],[15,16,17]]
# Output: [15]
# Explanation: 15 is the only lucky number since it is the minimum in its row and the maximum in its column
# Example 2:
# Input: matrix = [[1,10,4,2],[9,3,8,7],[15,16,17,12]]
# Output: [12]
# Explanation: 12 is the only lucky number since it is the minimum in its row and the maximum in its column.
# Example 3:
# Input: matrix = [[7,8],[1,2]]
# Output: [7]
# Constraints:
# m == mat.length
# n == mat[i].length
# 1 <= n, m <= 50
# 1 <= matrix[i][j] <= 10^5.
# All elements in the matrix are distinct.
class Solution:
def luckyNumbers (self, matrix):
cand = []
res = []
for i in range(len(matrix)):
cand.append(min(matrix[i]))
for j in range(len(matrix[0])):
max_col = matrix[0][j]
for i in range(0, len(matrix)):
if matrix[i][j] > max_col:
max_col = matrix[i][j]
if max_col in cand:
res.append(max_col)
return res
|
[
"hoang2109@gmail.com"
] |
hoang2109@gmail.com
|
a9b57132d2463d5a1544bb28dc24c79e9975f645
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/FalloutTankCarouselMeta.py
|
7f535a3045bd53b0a9708717d041352cc1658195
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/FalloutTankCarouselMeta.py
from gui.Scaleform.daapi.view.lobby.hangar.carousels.basic.tank_carousel import TankCarousel
class FalloutTankCarouselMeta(TankCarousel):
def changeVehicle(self, id):
self._printOverrideError('changeVehicle')
def clearSlot(self, vehicleId):
self._printOverrideError('clearSlot')
def shiftSlot(self, vehicleId):
self._printOverrideError('shiftSlot')
def as_setMultiselectionInfoS(self, data):
return self.flashObject.as_setMultiselectionInfo(data) if self._isDAAPIInited() else None
def as_getMultiselectionDPS(self):
return self.flashObject.as_getMultiselectionDP() if self._isDAAPIInited() else None
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
cd8dbe19a67e8ef7bed57e61394fe2abd45dd9cc
|
c48221dbd1335701178a8b4bfadd22b16fa168fd
|
/tests/example_app/admin.py
|
3bc84c5af51a76dbe4fa83ca91d69cf12bbdce1f
|
[
"BSD-2-Clause"
] |
permissive
|
prokaktus/django-meta
|
e92e9c45239993e4d6b350083a84c1d98a01f515
|
21e740f083ea32b150ad012c3bc7941ed920de20
|
refs/heads/develop
| 2020-04-05T14:35:42.801057
| 2017-05-06T12:26:17
| 2017-05-06T12:26:17
| 94,702,571
| 1
| 0
| null | 2017-06-18T17:31:16
| 2017-06-18T17:31:16
| null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.contrib import admin
from .models import Post
admin.site.register(Post)
|
[
"i.spalletti@nephila.it"
] |
i.spalletti@nephila.it
|
0fffdfcf4e1ec8477a77939f528d77ad467a4a16
|
fd25231975acd147e04dc3ed3627c92cb1a4f86c
|
/FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/sparse/bsr.py
|
f86c448e1d216ac11586184af2d1e5370407136f
|
[] |
no_license
|
sumitkutty/Flight-Price-Prediction
|
832a2802a3367e655b46d3b44f073d917abd2320
|
d974a8b75fbcbfa42f11703602af3e45a3f08b3c
|
refs/heads/master
| 2022-12-25T07:13:06.375888
| 2020-10-08T18:46:44
| 2020-10-08T18:46:44
| 302,366,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:ef0f9bb9c9a9bd37389ba380aa25a50d381d571eaa83888b4205f2d207020fc5
size 24882
|
[
"sumitkutty37@gmail.com"
] |
sumitkutty37@gmail.com
|
3161a5cd27238b9cfa5bc819abae49d5b6fca114
|
36785c0893ab1e2c81c6a03305f42459776a84e0
|
/ambra_sdk/service/entrypoints/order.py
|
f2e0fe900a88d10a1589a4cb0442c3d0b940b6d2
|
[
"Apache-2.0"
] |
permissive
|
dicomgrid/sdk-python
|
06589f87f33850bd15e6e99fb683bada6492775f
|
2618e682d38339439340d86080e8bc6ee6cf21b5
|
refs/heads/master
| 2022-08-28T14:50:35.864012
| 2022-08-22T12:36:50
| 2022-08-22T12:36:50
| 253,867,502
| 11
| 6
|
Apache-2.0
| 2022-04-13T10:06:38
| 2020-04-07T17:36:56
|
HTML
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
from ambra_sdk.service.entrypoints.generated.order import \
AsyncOrder as GAsyncOrder
from ambra_sdk.service.entrypoints.generated.order import Order as GOrder
class Order(GOrder):
"""Order."""
class AsyncOrder(GAsyncOrder):
"""AsyncOrder."""
|
[
"akapustin@ambrahealth.com"
] |
akapustin@ambrahealth.com
|
b866c9d98d721c2e46b53ade178d935ac345b7f0
|
20564b667fe6a9fa7c75e9b20e2f0446ec3440c8
|
/venv/bin/pip3
|
27feb6ae67391b30f70bc833820ae9d3ff02bf94
|
[] |
no_license
|
prashantpandey9/Covid19-India-tracker
|
a7e544264df92df7c790e5745ef70b69fc39263a
|
03df61342dffd12520f5f4172f879e35e9e6fa85
|
refs/heads/master
| 2023-08-14T00:36:19.974705
| 2021-02-27T11:16:49
| 2021-02-27T11:16:49
| 258,137,688
| 7
| 3
| null | 2021-09-22T19:02:07
| 2020-04-23T08:14:55
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
#!/home/prashant/my_project/covid_19/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"prashantpandey94551@gmail.com"
] |
prashantpandey94551@gmail.com
|
|
54f4a480d8c01ce5d7b1aa5e4b8ab7f31dfd0da8
|
ad5494244bb4d0d92df8178d96b99b949f9ee04c
|
/hashing/models.py
|
10ed2e0c8732c4953087615021d57659be2234d1
|
[] |
no_license
|
razyesh/Hash-gen-SHA256
|
14dbf86fab593bccec9997ec087535ee02995a6c
|
5cf91114962e048893c910832460f6984d787a38
|
refs/heads/master
| 2021-09-26T11:38:28.819734
| 2020-04-03T19:00:57
| 2020-04-03T19:00:57
| 252,815,271
| 0
| 0
| null | 2021-09-22T18:49:48
| 2020-04-03T18:56:29
|
Python
|
UTF-8
|
Python
| false
| false
| 156
|
py
|
from django.db import models
# Create your models here.
class Hash(models.Model):
text = models.TextField()
hash = models.CharField(max_length=64)
|
[
"pudasainirajesh504@gmail.com"
] |
pudasainirajesh504@gmail.com
|
57895863065f914aab45070167a5407a01d68969
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_hashish.py
|
6f095d1a7cb0b473df03c662bf7368e60180a1e3
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
#calss header
class _HASHISH():
def __init__(self,):
self.name = "HASHISH"
self.definitions = [u'a drug, illegal in many countries, made from the cannabis plant and usually smoked']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
61f25521a8ac9bb244b7920b6aad006c7e980101
|
04fcaa42f982bc6c8de1d3a28e83007a5b8b000d
|
/tests/tests_indiv_jobs/test_ooziejob.py
|
31743f67165d76004d2470b532d5a8319dbf825d
|
[
"BSD-3-Clause"
] |
permissive
|
tadinve/naga
|
26622416db7ff81a256a2f51daac0769763ed711
|
52a789ff79cc20aa999f7bb731a1c3cc3acc27fa
|
refs/heads/main
| 2023-08-12T22:45:19.876256
| 2021-09-24T02:15:42
| 2021-09-24T02:15:42
| 389,231,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
from ctm_python_client.jobs.hadoop.oozie import OozieJob
import os
from ctm_python_client.core.bmc_control_m import CmJobFlow
from ctm_python_client.session.session import Session
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(BASE_PATH + "/.secrets", "r") as fp:
ctm_uri = fp.readline().strip()
ctm_user = fp.readline().strip()
ctm_pwd = fp.readline().strip()
# Create CTM Session
session = Session(endpoint=ctm_uri, username=ctm_user, password=ctm_pwd)
# CREATE JOB FLOW
t1_flow = CmJobFlow(
application="Naga0.3_Test", sub_application="TestAllJobs", session=session
)
t1_flow.set_run_as(username="ctmuser", host="acb-rhctmv20")
# Define the schedule
months = ["JAN", "OCT", "DEC"]
monthDays = ["ALL"]
weekDays = ["MON", "TUE", "WED", "THU", "FRI"]
fromTime = "0300"
toTime = "2100"
t1_flow.set_schedule(months, monthDays, weekDays, fromTime, toTime)
# Create Folder
fn = os.path.split(__file__)[-1][:-3]
f1 = t1_flow.create_folder(name=fn)
j1 = OozieJob(
folder=f1,
job_name='oozie',
host="edgenode",
connection_profile="DEV_CLUSTER",
job_properties_file="/home/user/job.properties",
oozie_options=[{'inputDir': '/usr/tucu/inputdir'}, {'outputDir': '/usr/tucu/outputdir'}],
)
t1_flow.add_job(folder=f1, job=j1)
import json
x = t1_flow.deploy()
s = str(x[0])
s = s.replace("'", '"')
s = s.replace("None", '"None"')
s = s.replace("False", '"False"')
s = s.replace("True", '"True"')
s = s.replace("\n", "")
j = json.loads(s)
def test_output():
assert j["successful_smart_folders_count"] == 1
|
[
"vtadinad@bmc.com"
] |
vtadinad@bmc.com
|
99a71c7b9e27e3157cd4de85ab83cc05e523d4bc
|
d902ac93fbff644ca2868d5836a9f476f3cd91fd
|
/wq_csv/util.py
|
6bbe34d6c95cb08f37c53383ded1fea23ec6d5ee
|
[] |
no_license
|
NMWDI/WDIExtractors
|
5b3c2ad3562449ba6c3c2467284c9cd3e046837f
|
2da9aa7a1bd53d58ff7479f4507fffbc15b3bbb2
|
refs/heads/master
| 2022-12-24T17:19:35.998182
| 2020-10-02T19:22:48
| 2020-10-02T19:22:48
| 265,703,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,162
|
py
|
# ===============================================================================
# Copyright 2020 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import re
import tempfile
from datetime import datetime
import yaml
from itertools import groupby
from operator import attrgetter, itemgetter
NO_DESCRIPTION = 'No Description Available'
import logging
logger = logging.getLogger('Parser')
logger.setLevel(logging.DEBUG)
def rows_to_yaml(location_name, path, items, wq_tag):
longitude_key = 'Longitude'
latitude_key = 'Latitude'
items = list(items)
item = items[0]
logger.debug('items {}'.format(items))
logger.debug('item {}'.format(item))
try:
wd = float(item['WellDepth'])
except:
wd = ''
obj = {'location': {'name': location_name, 'description': NO_DESCRIPTION},
'sensor': {'name': 'Analytical Water Chemistry', 'description': NO_DESCRIPTION},
'thing': {'name': 'WaterQuality',
'properties': {'welldepth': wd,
'datasource': item['DataSource']},
'description': NO_DESCRIPTION},
'datastream': {'name': '{} Water Quality Datastream'.format(wq_tag), 'description': NO_DESCRIPTION},
'observed_property': {'name': wq_tag, 'description': NO_DESCRIPTION}}
loc = obj['location']
loc['geometry'] = {'type': 'Point', 'coordinates': [float(item[longitude_key]), float(item[latitude_key])]}
ds = obj['datastream']
ds['unitofMeasurement'] = 'ppm'
result = item[wq_tag]
ds['observationType'] = get_observation_type(result)
def obsfactory(i):
pt = i['CollectionDate']
pt = datetime.strptime(pt, '%Y-%m-%d %H:%M:%S.%f')
return '{}.000Z, {}'.format(pt.isoformat(), i[wq_tag])
obj['observations'] = [obsfactory(item) for item in items]
with open(path, 'w') as wf:
yaml.dump(obj, wf)
DOUBLE = re.compile(r'^-?\d+.\d+')
BOOL = re.compile(r'^true|false|t|f')
URI = re.compile(r'^http')
INT = re.compile(r'^-?\d+$')
def get_observation_type(value):
for res, oti in ((DOUBLE, 'double'),
(BOOL, 'bool'),
(URI, 'uri'),
(INT, 'integer')):
if res.match(value.strip().lower()):
ot = oti
break
else:
ot = 'any'
return ot
class Parser:
def __enter__(self):
self._tempdir = tempfile.mkdtemp()
# self._tempdir = '/Users/ross/Sandbox/wdi/csvextractor'
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
# os.removedirs(self._tempdir)
def items(self, inputfile):
"""
read inputfile as csv, convert to a list of yamls
WQ_XXX.csv example headers
WQ_Arsenic
POINT_ID,CollectionDate,HistoricDate,Arsenic,Latitude,Longitude,WellDepth,DataSource,
DataSourceInfo,Arsenic_Symbol,GeoLocation
:param inputfile:
:return: list of paths
"""
delimiter = ','
with open(inputfile, 'r') as rf:
rows = []
header = None
for line in rf:
row = line.split(delimiter)
if header is None:
header = [r.strip() for r in row]
continue
row = dict(zip(header, [r.strip() for r in row]))
rows.append(row)
# determine wq_tag from header
wq_tag = next((n for n in ('Arsenic', 'HCO3',
'Ca', 'Cl', 'F', 'Mg', 'Na',
'SO4', 'TDS', 'U') if n in header), None)
for location_name, items in groupby(sorted(rows,
key=itemgetter('POINT_ID')),
key=itemgetter('POINT_ID')):
location_name = location_name.replace(' ', '_')
name = '{}.yaml'.format(location_name)
tmpfile = os.path.join(self._tempdir, name)
rows_to_yaml(location_name, tmpfile, items, wq_tag)
yield tmpfile
# if __name__ == '__main__':
# with Parser() as p:
# for i in p.items('/Users/ross/Programming/wdidata/wq_arsenic.csv'):
# print('isda', i)
# with open(i, 'r') as rfile:
# obj = yaml.load(rfile, Loader=yaml.SafeLoader)
# print('asd', obj)
# break
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
196a7d3700530cf459ab672120d0ffb207717998
|
c2d5055a7b292d18facce55d975ff8f9a19b5c39
|
/examples/list_uarts.py
|
cbacc1f1a56b6a6c280b9dcd6a600aa50896ddb8
|
[
"MIT"
] |
permissive
|
playi/Adafruit_Python_BluefruitLE
|
11f6c268436ebf1eb554fdfcf3a58eac0d01816e
|
928669aff263b6602365ecfea2a1efe1950c111c
|
refs/heads/master
| 2021-04-18T07:20:13.684162
| 2018-06-04T18:04:26
| 2018-06-04T18:04:26
| 126,250,975
| 2
| 3
|
MIT
| 2018-08-01T20:06:52
| 2018-03-21T23:04:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,515
|
py
|
# Search for BLE UART devices and list all that are found.
# Author: Tony DiCola
import atexit
import time
import Adafruit_BluefruitLE
from Adafruit_BluefruitLE.services import UART
# Get the BLE provider for the current platform.
ble = Adafruit_BluefruitLE.get_provider()
# Main function implements the program logic so it can run in a background
# thread. Most platforms require the main thread to handle GUI events and other
# asyncronous events like BLE actions. All of the threading logic is taken care
# of automatically though and you just need to provide a main function that uses
# the BLE provider.
def main():
# Clear any cached data because both bluez and CoreBluetooth have issues with
# caching data and it going stale.
ble.clear_cached_data()
# Get the first available BLE network adapter and make sure it's powered on.
adapter = ble.get_default_adapter()
adapter.power_on()
print('Using adapter: {0}'.format(adapter.name))
# Start scanning with the bluetooth adapter.
adapter.start_scan()
# Use atexit.register to call the adapter stop_scan function before quiting.
# This is good practice for calling cleanup code in this main function as
# a try/finally block might not be called since this is a background thread.
atexit.register(adapter.stop_scan)
print('Searching for UART devices...')
print('Press Ctrl-C to quit (will take ~30 seconds on OSX).')
# Enter a loop and print out whenever a new UART device is found.
known_uarts = set()
while True:
# Call UART.find_devices to get a list of any UART devices that
# have been found. This call will quickly return results and does
# not wait for devices to appear.
found = set(UART.find_devices())
# Check for new devices that haven't been seen yet and print out
# their name and ID (MAC address on Linux, GUID on OSX).
new = found - known_uarts
for device in new:
print('Found UART: {0} [{1}]'.format(device.name, device.id))
known_uarts.update(new)
# Sleep for a second and see if new devices have appeared.
time.sleep(1.0)
# Initialize the BLE system. MUST be called before other BLE calls!
ble.initialize()
# Start the mainloop to process BLE events, and run the provided function in
# a background thread. When the provided main function stops running, returns
# an integer status code, or throws an error the program will exit.
ble.run_mainloop_with(main)
|
[
"tony@tonydicola.com"
] |
tony@tonydicola.com
|
02fb7d52b9f2d4dd06d435b3711339f8d9111826
|
baf3996414315ffb60470c40c7ad797bf4e6897f
|
/02_ai/4_cv/1_ml_mastery/1_cv/code/chapter_20/07_model_3vgg_data_aug.py
|
02800ffcce5e2f90d1d314cb9dac9b719e6d5e41
|
[
"MIT"
] |
permissive
|
thiago-allue/portfolio
|
8fbbecca7ce232567aebe97c19944f444508b7f4
|
0acd8253dc7c5150fef9b2d46eead3db83ca42de
|
refs/heads/main
| 2023-03-15T22:10:21.109707
| 2022-09-14T17:04:35
| 2022-09-14T17:04:35
| 207,919,073
| 0
| 0
| null | 2019-11-13T18:18:23
| 2019-09-11T22:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,497
|
py
|
# baseline model with data augmentation on the cifar10 dataset
import sys
from matplotlib import pyplot
from keras.datasets import cifar10
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
# load train and test dataset
def load_dataset():
# load dataset
(trainX, trainY), (testX, testY) = cifar10.load_data()
# one hot encode target values
trainY = to_categorical(trainY)
testY = to_categorical(testY)
return trainX, trainY, testX, testY
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize to range 0-1
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return normalized images
return train_norm, test_norm
# define cnn model
def define_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same', input_shape=(32, 32, 3)))
model.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(10, activation='softmax'))
# compile model
opt = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# plot diagnostic learning curves
def summarize_diagnostics(history):
# plot loss
pyplot.subplot(211)
pyplot.title('Cross Entropy Loss')
pyplot.plot(history.history['loss'], color='blue', label='train')
pyplot.plot(history.history['val_loss'], color='orange', label='test')
# plot accuracy
pyplot.subplot(212)
pyplot.title('Classification Accuracy')
pyplot.plot(history.history['acc'], color='blue', label='train')
pyplot.plot(history.history['val_acc'], color='orange', label='test')
# save plot to file
filename = sys.argv[0].split('/')[-1]
pyplot.savefig(filename + '_plot.png')
pyplot.close()
# run the test harness for evaluating a model
def run_test_harness():
# load dataset
trainX, trainY, testX, testY = load_dataset()
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
# define model
model = define_model()
# create data generator
datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
# prepare iterator
it_train = datagen.flow(trainX, trainY, batch_size=64)
# fit model
steps = int(trainX.shape[0] / 64)
history = model.fit_generator(it_train, steps_per_epoch=steps, epochs=100, validation_data=(testX, testY), verbose=0)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=0)
print('> %.3f' % (acc * 100.0))
# learning curves
summarize_diagnostics(history)
# entry point, run the test harness
run_test_harness()
|
[
"thiago.allue@yahoo.com"
] |
thiago.allue@yahoo.com
|
6a6c28212d9e9aabe4376c6f3ca2a32bf4e73053
|
f6f4c87a1f2e750530a7d691da43514d84f99f5c
|
/hw20/a/q3/q3.py
|
6ee2af17e499595428e05652bd352713f2bf63d4
|
[] |
no_license
|
sarthak77/Basics-of-ML-AI
|
e941c6653bca95278cc62ee7ba229e8eaf4e309b
|
cb2ba9d271da919846211cf8496e29aff6beaa46
|
refs/heads/master
| 2020-07-25T10:33:54.420972
| 2020-01-09T19:25:57
| 2020-01-09T19:25:57
| 208,257,383
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
#Import modules
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.svm import SVC
import keras
from keras import regularizers
import keras.optimizers
import keras.initializers
from keras.models import Sequential
from keras.models import Model
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Dense, Dropout
from keras.layers import Input, add
from keras.datasets import mnist
"""
Load the data
NOTrS:training samples
NOTeS:test samples
TRC:training class
TRL:training labels
TEC:test class
TEL:test labels
"""
NOTrS=10000
NOTeS=1000
(TRC,TRL),(TEC,TEL)=mnist.load_data()
TRC=TRC.reshape(len(TRC),28*28)
TRC=TRC[0:NOTrS]
TRL=TRL[0:NOTrS]
TRC=TRC/255
predtest=keras.utils.to_categorical(TEL,10)
TEC=TEC.reshape(len(TEC),28*28)
TEC=TEC[0:NOTeS]
TEL=TEL[0:NOTeS]
TEC=TEC/255
predtrain=keras.utils.to_categorical(TRL,10)
#Initialise parameters
NOE=20
B=128
temp=[10,50,100,300,400,500]
encoders=[]
#Allpy NN
for i in range(6):
x=Input(shape=(784,))
H1=Dense(temp[i],activation='relu')(x)
h=Dense(temp[i]//2,activation='relu')(H1)
H2=Dense(temp[i],activation='relu')(h)
r=Dense(784,activation='sigmoid')(H2)
autoencoder=Model(inputs=x,outputs=r)
autoencoder.compile(optimizer=keras.optimizers.Adam(),loss='mse')
history=autoencoder.fit(TRC,TRC, batch_size=B, epochs=NOE, verbose=0, validation_data=(TEC,TEC))
encoders.append(Model(autoencoder.input,autoencoder.layers[-3].output))
#Raw model
c=.1
MR=SVC(C=c,kernel='rbf')
MR.fit(TRC,TRL)
raw_pred=MR.predict(TEC)
#Find accuracy from raw model
ACCR=0
for i in range(len(TEL)):
if(raw_pred[i]==TEL[i]):
ACCR=ACCR+1
#Find accuracy from auto E
ACC=[]
model_encode=SVC(C=c,kernel='rbf')
for i in range(6):
E=encoders[i]
entr=E.predict(TRC)
ente=E.predict(TEC)
model_encode.fit(entr,TRL)
out=model_encode.predict(ente)
ACCEN=0
for i in range(len(TEL)):
if(out[i]==TEL[i]):
ACCEN=ACCEN+1
ACC.append(ACCEN/10)
#plotting
#calculate X and Y
Y=[temp[i]+temp[i]//2 for i in range(len(temp))]
Y.append("Raw")
X=np.arange(7)
ACC.append(ACCR/10)
plt.bar(X,ACC, align='center', alpha=0.5)
plt.ylabel('Accuracy')
plt.xticks(X, Y)
plt.title('SVM classifier with RBF kernel')
plt.tight_layout()
plt.show()
|
[
"sarthak.singhal@students.iiit.ac.in"
] |
sarthak.singhal@students.iiit.ac.in
|
e8518de3fd37b928d126d13d14cc0fe03395fbf7
|
b838c392fec9934d73b6b605d672667bf1d5e3fd
|
/backend/application.py
|
88743c87cdba3245e549229a5b6501dfa63b214f
|
[] |
no_license
|
hmisonne/Canoo_SWOPS_test
|
25ddcdb624cfeb8542206e3c055a5b4fba95f328
|
01c1556c60680674d51f8047f9c3c5afe8b91a03
|
refs/heads/main
| 2023-04-10T14:23:43.977490
| 2021-04-23T20:15:16
| 2021-04-23T20:15:16
| 360,990,095
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,834
|
py
|
from flask import Flask
from flask import render_template, abort, redirect, url_for, request, jsonify
from flask_cors import CORS
import json
import uuid
app = Flask(__name__)
CORS(app)
# Configure "database" as json file
database_path = "data.json"
app.config['JSON_DATA'] = database_path
def read_json():
path = app.config["JSON_DATA"]
with open(path) as jsonFile:
return json.load(jsonFile)
def write_json(data):
path = app.config["JSON_DATA"]
with open(path, "w") as jsonFile:
json.dump(data, jsonFile)
@app.route("/temperature", methods=['GET'])
def get_temperature():
try:
data = read_json()
result = data['temperature']
response = {
'success': True,
'data': result
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
except:
app.logger.info('%s %s %s',request.method, request.url_rule, "422: unprocessable")
abort(422)
@app.route("/temperature", methods=[ 'POST'])
def set_temperature():
body = request.get_json()
value = body.get('temperature', None)
if value is None or type(value) != int:
app.logger.error('%s %s %s',request.method, request.url_rule, "400: Bad request")
abort(400)
data = read_json()
data["temperature"] = value
write_json(data)
response = {
'success': True,
'data': value
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
@app.route("/lights", methods=["GET"])
def get_lights():
try:
data = read_json()
result = list(data['lights'].values())
response = {
'success': True,
'data': result
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
except:
app.logger.info('%s %s %s',request.method, request.url_rule, "422: unprocessable")
abort(422)
@app.route("/lights", methods=["POST"])
def add_light():
try:
data = read_json()
light_id = uuid.uuid1().hex
newLight = {
"id": light_id,
"turnedOn": False,
}
data["lights"][light_id] = newLight
write_json(data)
response = {
'success': True,
'data': newLight
}
app.logger.info('%s %s %s',request.method, request.url_rule, response)
return jsonify(response)
except:
app.logger.info('%s %s %s',request.method, request.url_rule, "422: unprocessable")
abort(422)
@app.route("/lights/<light_id>", methods=["GET"])
def get_light(light_id):
data = read_json()
light = data["lights"].get(light_id, None)
if light is None:
app.logger.error('%s %s=%s %s',request.method, request.url_rule, light_id, "404: Resource not found")
abort(404)
response = {
'success': True,
'data': light
}
app.logger.info('%s %s=%s %s',request.method, request.url_rule, light_id, response)
return jsonify(response)
@app.route("/lights/<light_id>", methods=["DELETE"])
def remove_light(light_id):
data = read_json()
light_toDelete = data["lights"].get(light_id, None)
if light_toDelete is None:
app.logger.error('%s %s=%s %s',request.method, request.url_rule, light_id, "404: Resource not found")
abort(404)
del data["lights"][light_id]
write_json(data)
response = {
'success': True,
'light_deleted': light_id
}
app.logger.info('%s %s=%s %s',request.method, request.url_rule, light_id, response)
return jsonify(response)
@app.route("/lights/<light_id>", methods=["PUT"])
def toggle_light(light_id):
data = read_json()
light_toToggle = data["lights"].get(light_id, None)
if light_toToggle is None:
app.logger.error('%s %s=%s %s',request.method, request.url_rule, light_id, "404: Resource not found")
abort(404)
light_toToggle['turnedOn'] = not light_toToggle['turnedOn']
write_json(data)
response = {
'success': True,
'data': light_toToggle
}
app.logger.info('%s %s=%s %s',request.method, request.url_rule, light_id, response)
return jsonify(response)
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad request"
}), 400
|
[
"helene.misonne@gmail.com"
] |
helene.misonne@gmail.com
|
22c07331e1b110b09e94605b96187b0a4ec40108
|
c1960138a37d9b87bbc6ebd225ec54e09ede4a33
|
/Week 11/ArnieMartin_CircuitPlayground_Single_Mouse_Button_Advanced.py
|
318bd949acb9fb6b6f28b56d917f96a7274d3b7d
|
[] |
no_license
|
apalileo/ACCD_PHCR_SP21
|
76d0e27c4203a2e90270cb2d84a75169f5db5240
|
37923f70f4c5536b18f0353470bedab200c67bad
|
refs/heads/main
| 2023-04-07T00:01:35.922061
| 2021-04-15T18:02:22
| 2021-04-15T18:02:22
| 332,101,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
# more advanced-ish use of single onboard button to control LMB and RMB
# works with CPX and CPB, copy adafuit_hid to /lib
import board
import time
import digitalio
import busio
import adafruit_lis3dh
import usb_hid
from adafruit_hid.mouse import Mouse
killswitch = digitalio.DigitalInOut(board.SLIDE_SWITCH)
killswitch.direction = digitalio.Direction.INPUT
killswitch.pull = digitalio.Pull.UP
smb = digitalio.DigitalInOut(board.BUTTON_A)
smb.direction = digitalio.Direction.INPUT
smb.pull = digitalio.Pull.DOWN
smb_pre = smb.value
mouse = Mouse(usb_hid.devices)
smb_time = 0
RMB_DELAY = 0.5
while True:
if killswitch.value:
if smb.value is not smb_pre:
smb_pre = smb.value
if smb.value:
print("button clicked...")
smb_time = time.monotonic()
print("press time is", smb_time)
if not smb.value:
print("release time is", time.monotonic())
eltime = time.monotonic() - smb_time
print("elapsed time is", eltime)
if eltime < RMB_DELAY:
print("short press... LMB clicked!")
mouse.click(Mouse.LEFT_BUTTON)
smb_time = 0
else:
if smb_time != 0:
eltime = time.monotonic() - smb_time
print("elapsed time is", eltime)
time.sleep(0.01)
if eltime > RMB_DELAY:
print("long press... RMB clicked!")
mouse.click(Mouse.RIGHT_BUTTON)
smb_time = 0
|
[
"55570902+apalileo@users.noreply.github.com"
] |
55570902+apalileo@users.noreply.github.com
|
9d01e66f647965d123823224278de34331718f3a
|
4e67c2edd71493a98a3f13e5b2073c1d05b1b656
|
/Semestre 02/ProjetoIntegrador2/Aula 08.27.2020/Metodos_lista.py
|
75d15a7c7e7bd102bbd084a1482db9446b15b851
|
[] |
no_license
|
felipellima83/UniCEUB
|
05991d7a02b13cd4e236f3be3a34726af2dc1504
|
dbc44866545b5247d1b5f76ec6e9b7778e54093e
|
refs/heads/master
| 2023-07-08T19:04:19.830473
| 2021-08-12T12:33:49
| 2021-08-12T12:33:49
| 249,958,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
lista = [ ]
t_lista = int(input("Quantidade: "))
for i in range(t_lista):
n = int(input("Digite o número: "))
lista.append(n)
print(lista)
print(len(lista))
print(sum(lista))
print(max(lista))
print(min(lista))
pesquisa = int(input("Qual: "))
if pesquisa in lista:
posicao = lista.index(pesquisa)
print(posicao)
else:
print("Não tem")
#lista.sort()
lista.reverse()
print(lista)
print("Média: ",sum(lista)/len(lista))
|
[
"felipellima83@gmail.com"
] |
felipellima83@gmail.com
|
0af3834982b72c9cdf345b6aff6ffd9e7ccea915
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02716/s014491177.py
|
f6f24c53a93fa9d93bb64a4642c8896800c4707e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
import sys
sr = lambda: sys.stdin.readline().rstrip()
ir = lambda: int(sr())
lr = lambda: list(map(int, sr().split()))
from collections import defaultdict
def resolve():
N = ir()
A = lr()
dp = defaultdict(lambda: -float('inf'))
dp[0, 0, 0] = 0
for i in range(N):
for j in range(max(i//2-1, 0), i//2+2):
dp[i+1, j+1, 1] = dp[i, j, 0]+A[i]
dp[i+1, j, 0] = max(dp[i, j, 0], dp[i, j, 1])
# print(dp)
print(max(dp[N, N//2, 1], dp[N, N//2, 0]))
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
bfb26558c8d4426d10ad050db99ba001fa79afe6
|
c727d96f9ee57a186ac2f6a069e7863017131d9b
|
/mypy_boto3_builder/structures/waiter.py
|
573b3a99a0ef983c7359c5cc3713e46a8412d8b4
|
[
"MIT"
] |
permissive
|
ayobuba/mypy_boto3_builder
|
66438be3c9fce1e6215a58692b69496250eda433
|
9315adca025a5831ab1c2c00d3ed8602a21e8c74
|
refs/heads/master
| 2022-11-08T12:38:00.596915
| 2020-06-09T23:10:30
| 2020-06-09T23:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,734
|
py
|
"""
Boto3 client Waiter.
"""
from dataclasses import dataclass, field
from typing import List
from botocore.waiter import Waiter as Boto3Waiter
from mypy_boto3_builder.enums.service_module_name import ServiceModuleName
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.service_name import ServiceName, ServiceNameCatalog
from mypy_boto3_builder.structures.argument import Argument
from mypy_boto3_builder.structures.class_record import ClassRecord
from mypy_boto3_builder.structures.method import Method
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
from mypy_boto3_builder.type_annotations.type import Type
from mypy_boto3_builder.type_annotations.type_class import TypeClass
from mypy_boto3_builder.type_annotations.type_literal import TypeLiteral
@dataclass
class Waiter(ClassRecord):
"""
Boto3 client Waiter.
"""
waiter_name: str = "waiter_name"
service_name: ServiceName = ServiceNameCatalog.ec2
bases: List[FakeAnnotation] = field(
default_factory=lambda: [TypeClass(Boto3Waiter, alias="Boto3Waiter")]
)
def get_client_method(self) -> Method:
return Method(
name="get_waiter",
decorators=[Type.overload],
docstring=self.docstring,
arguments=[
Argument("self", None),
Argument("waiter_name", TypeLiteral(self.waiter_name)),
],
return_type=ExternalImport(
source=ImportString(self.service_name.module_name, ServiceModuleName.waiter.value),
name=self.name,
),
)
|
[
"volshebnyi@gmail.com"
] |
volshebnyi@gmail.com
|
33ae7c9f65c1462e20cf31b50507a4e2a51c791e
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Advanced Corporate Actions/FPythonCode/FCorpActionPayoutViewer.py
|
aba8a545dc56d44c822c7b513a9f647546083cb7
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,739
|
py
|
""" Compiled: 2020-09-18 10:38:49 """
#__src_file__ = "extensions/advanced_corporate_actions/./etc/FCorpActionPayoutViewer.py"
import acm
import FUxCore
def SelectFirstItem(objList, itemList):
if objList:
firstItem = objList[0]
itemList.SetData(firstItem)
def RemoveItem(objList, itemList, item):
index = objList.index(item)
objList.remove(item)
itemList.RemoveItem(item)
if objList:
if len(objList) <= index:
index -= 1
newItem = objList[index]
if newItem:
itemList.SetData(newItem)
def OnDeleteClicked(self, cd):
val = self.m_values.GetData()
if val:
acm.FCorporateActionPayout[val].Delete()
RemoveItem(self.valList, self.m_values, val)
def OnValDoubleClicked(self, cd):
val = self.m_values.GetData()
if val:
acm.StartRunScript(acm.FCorporateActionPayout[val], 'Modify')
class PayoutsListCustomDialog(FUxCore.LayoutDialog):
LIST_VALUES = 'listValues'
BTN_DELETE = 'btnDelete'
def __init__(self, params):
self.choices = params['choices']
self.selected = params['selected']
self.caption = 'Payouts List'
self.valLabel = 'Payouts'
self.valList = []
self.selectList = []
def HandleApply(self):
resultDic = acm.FDictionary()
resultDic.AtPut('result', self.valList)
return resultDic
def SetControlData(self):
SelectFirstItem(self.valList, self.m_values)
def HandleCreate(self, dlg, layout):
self.m_fuxDlg = dlg
self.m_fuxDlg.Caption(self.caption)
self.m_values = layout.GetControl(self.LIST_VALUES)
self.m_values.AddCallback('DefaultAction', OnValDoubleClicked, self)
self.m_btnDelete = layout.GetControl(self.BTN_DELETE)
self.m_btnDelete.AddCallback('Activate', OnDeleteClicked, self)
self.PopulateControls()
self.SetControlData()
def CreateLayout(self):
b = acm.FUxLayoutBuilder()
b.BeginVertBox()
b. BeginHorzBox()
b. AddSpace(3)
b. BeginVertBox()
b. AddLabel("lblValues", self.valLabel)
b. AddList(self.LIST_VALUES, 10, -1, 15, -1)
b. EndBox()
b. AddSpace(3)
b. EndBox()
b. AddSpace(5)
b. BeginHorzBox()
b. AddFill()
b. AddButton(self.BTN_DELETE, "Delete")
b. AddButton('ok', 'Close')
b. AddSpace(3)
b. EndBox()
b.EndBox()
return b
def PopulateControls(self):
self.valList = [s for s in self.selected]
self.valList.sort()
self.m_values.Populate(self.valList)
if self.valList:
self.m_values.SetData(self.valList[0])
|
[
"81222178+nenchoabsa@users.noreply.github.com"
] |
81222178+nenchoabsa@users.noreply.github.com
|
0950fd4826dade797628563d1bc43cbc412d6018
|
7234e6c72eb3f09c4a66dbe91f00fdf7742f010f
|
/algo/dp/medium/longestArithmeticSequence2.py
|
50004a01f60d588e9b4155d465a095777a44c4e1
|
[] |
no_license
|
srinathalla/python
|
718ac603473e7bed060ba66aa3d39a90cf7ef69d
|
b6c546070b1738350303df3939888d1b0e90e89b
|
refs/heads/master
| 2021-06-13T06:11:42.653311
| 2021-02-19T06:01:41
| 2021-02-19T06:01:41
| 150,374,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
from typing import List
class Solution:
def longestArithSeqLength(self, A: List[int]) -> int:
dp = {}
for i in range(1, len(A)):
for j in range(i):
dp[i, A[i] - A[j]] = dp.get((j, A[i] - A[j]), 1) + 1
print(dp)
return max(dp.values())
s = Solution()
print(s.longestArithSeqLength([3, 6, 9, 12]))
|
[
"srinathb10j.ik@gmail.com"
] |
srinathb10j.ik@gmail.com
|
c17b10b518d979feca78ccbe1e9788aeba9bd12a
|
37a776779a43662ed9bb6523f1f42fd3dc215610
|
/TTHAnalysis/python/tools/treeReaderArrayTools.py
|
f35c73164ab905d806528e773e44aa78dd865187
|
[] |
no_license
|
ashrafkasem/cmgtools-lite
|
818b3bc42ee3b5944074b45bc74248b8c32fa514
|
14d0a465aa5cfaa2036a38fcc2589cda159c6f72
|
refs/heads/80X
| 2022-12-31T02:07:00.683384
| 2017-01-25T10:19:40
| 2017-01-25T10:19:40
| 109,703,088
| 0
| 0
| null | 2017-11-06T14:06:09
| 2017-11-06T14:06:09
| null |
UTF-8
|
Python
| false
| false
| 4,008
|
py
|
import types
import ROOT
def initTree(tree):
tree.entry = -1
tree._ttreereader = ROOT.TTreeReader(tree)
tree._ttreereader.SetEntry(0)
tree._ttrvs = {}
tree._ttras = {}
tree._leafTypes = {}
tree._ttreereaderversion = 1
tree.arrayReader = types.MethodType(getArrayReader, tree)
tree.valueReader = types.MethodType(getValueReader, tree)
tree.readBranch = types.MethodType(readBranch, tree)
def getArrayReader(tree, branchName, isClean=False):
"""Make a reader for branch branchName containing a variable-length value array.
If you are sure nobody has yet read from the tree, you can set isClean to True and save some overhead."""
if branchName not in tree._ttras:
if not tree.GetBranch(branchName): raise RuntimeError, "Can't find branch '%s'" % branchName
leaf = tree.GetBranch(branchName).GetLeaf(branchName)
if not leaf.GetLen() == 0: raise RuntimeError, "Branch %s is not a variable-length value array" % branchName
typ = _rootType2Python[leaf.GetTypeName()]
tree._ttras[branchName] = _makeArrayReader(tree, typ, branchName, remakeAllFirst=not(isClean))
return tree._ttras[branchName]
def getValueReader(tree, branchName, isClean=False):
"""Make a reader for branch branchName containing a single value.
If you are sure nobody has yet read from the tree, you can set isClean to True and save some overhead."""
if branchName not in tree._ttrvs:
if not tree.GetBranch(branchName): raise RuntimeError, "Can't find branch '%s'" % branchName
leaf = tree.GetBranch(branchName).GetLeaf(branchName)
if not leaf.GetLen() == 1: raise RuntimeError, "Branch %s is not a value" % branchName
typ = _rootType2Python[leaf.GetTypeName()]
tree._ttrvs[branchName] = _makeValueReader(tree, typ, branchName, remakeAllFirst=not(isClean))
return tree._ttrvs[branchName]
def readBranch(tree, branchName):
"""Return the branch value if the branch is a value, and a TreeReaderArray if the branch is an array"""
if branchName in tree._ttras:
return tree._ttras[branchName]
elif branchName in tree._ttrvs:
return tree._ttrvs[branchName].Get()[0]
else:
branch = tree.GetBranch(branchName)
if not branch: raise RuntimeError, "Unknown branch %s" % branchName
leaf = branch.GetLeaf(branchName)
if leaf.GetTypeName() not in _rootType2Python:
raise RuntimeError, "Branch %s has unsupported type %s" % (branchName, leaf.GetTypeName())
typ = _rootType2Python[leaf.GetTypeName()]
if leaf.GetLen() == 1:
return _makeValueReader(tree, typ, branchName).Get()[0]
else:
return _makeArrayReader(tree, typ, branchName)
####### PRIVATE IMPLEMENTATION PART #######
_rootType2Python = { 'Int_t':int, 'Long_t':long, 'UInt_t':int, 'ULong_t':long,
'Float_t':float, 'Double_t':float }
def _makeArrayReader(tree, typ, nam, remakeAllFirst=True):
if remakeAllFirst: _remakeAllReaders(tree)
ttra = ROOT.TTreeReaderArray(typ)(tree._ttreereader, nam)
tree._leafTypes[nam] = typ
tree._ttras[nam] = ttra;
tree._ttreereader.SetEntry(tree.entry)
return tree._ttras[nam]
def _makeValueReader(tree, typ, nam, remakeAllFirst=True):
if remakeAllFirst: _remakeAllReaders(tree)
ttrv = ROOT.TTreeReaderValue(typ)(tree._ttreereader, nam)
tree._leafTypes[nam] = typ
tree._ttrvs[nam] = ttrv
tree._ttreereader.SetEntry(tree.entry)
return tree._ttrvs[nam]
def _remakeAllReaders(tree):
_ttreereader = ROOT.TTreeReader(tree)
_ttrvs = {}
for k in tree._ttrvs.iterkeys():
_ttrvs[k] = ROOT.TTreeReaderValue(tree._leafTypes[k])(_ttreereader,k)
_ttras = {}
for k in tree._ttras.iterkeys():
_ttras[k] = ROOT.TTreeReaderArray(tree._leafTypes[k])(_ttreereader,k)
tree._ttrvs = _ttrvs
tree._ttras = _ttras
tree._ttreereader = _ttreereader
tree._ttreereaderversion += 1
|
[
"gpetruc@gmail.com"
] |
gpetruc@gmail.com
|
d5b36533b86e2e386538d1e81c4dcd407fa84e4e
|
b0d0dbb0742e3925bc8adab9bb7b7ee458972ad6
|
/analayze.py
|
13faa7de5dd2d27767b804c06edf05fc13229fab
|
[
"Apache-2.0"
] |
permissive
|
miyosuda/dendritic_bp
|
0cd0e23da0db7ba7460f7b209a92362f9f0f28fe
|
fd831b6ad9ae1993a14ba970408b80abfd45f0b1
|
refs/heads/master
| 2020-04-05T12:47:53.112472
| 2019-04-12T00:46:59
| 2019-04-12T00:46:59
| 156,880,868
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,657
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model import Layer, LAYER_TYPE_BOTTOM, LAYER_TYPE_HIDDEN, LAYER_TYPE_TOP, LowPassFilter
from option import Option
# TopとBottomだけを接続して分析するためのコード
class Network(object):
def __init__(self):
option = Option()
self.layers = [None] * 2
self.layers[0] = Layer(pd_unit_size=1, layer_type=LAYER_TYPE_BOTTOM, option=option)
self.layers[1] = Layer(pd_unit_size=1, layer_type=LAYER_TYPE_TOP, option=option)
self.layers[0].connect_to(self.layers[1])
self.set_target_prediction_mode()
def set_target_prediction_mode(self):
# Pyramidalのweightを更新する
for layer in self.layers:
# ここを変えている
layer.train_w_pp_bu = True
layer.train_w_pp_td = False
layer.train_w_ip = False
layer.train_w_pi = False
for i,layer in enumerate(self.layers):
option = Option.get_target_prediction_option(i)
layer.set_option(option)
def update(self, dt):
for layer in self.layers:
layer.update_potential(dt)
for layer in self.layers:
layer.update_weight(dt)
def set_input_firing_rate(self, values):
self.layers[0].set_input_firing_rate(values)
def set_target_firing_rate(self, values):
self.layers[1].set_target_firing_rate(values)
def train_target_prediction(network):
dt = 0.1
lp_filter = LowPassFilter(dt, 3)
target_values = np.array([0.8], dtype=np.float32)
values = np.array([0.5], dtype=np.float32)
network.set_target_firing_rate(target_values)
network.set_input_firing_rate(values)
iteration = 2000
for i in range(iteration):
for j in range(1000):
network.update(dt)
du = network.layers[1].u_target - network.layers[1].u_p
v_p_b = network.layers[1].v_p_b
u_p = network.layers[1].u_p
print("du={}, v_p_b={}, u_p={}".format(du, v_p_b, u_p))
"""
print("upper_r_p={}, upper_v_p_b_hat={}, upper_r_p_b={}, d_w_pp_bu={}".format(
network.layers[0].debug_upper_r_p,
network.layers[0].debug_upper_v_p_b_hat,
network.layers[0].debug_upper_r_p_b,
network.layers[0].debug_d_w_pp_bu))
"""
def main():
np.random.seed(seed=0)
network = Network()
train_target_prediction(network)
if __name__ == '__main__':
main()
|
[
"miyoshi@narr.jp"
] |
miyoshi@narr.jp
|
b794c2df5ea3e950d2a988e2baa61785911770f8
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/378. Kth Smallest Element in a Sorted Matrix/solution1.py
|
85aed3e0043f6c962b512d91df8783c990105928
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589
| 2021-02-05T13:36:50
| 2021-02-05T13:36:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
lo, hi = matrix[0][0], matrix[-1][-1]
while lo <= hi:
mid = (lo + hi) >> 1
loc = sum(bisect.bisect_right(m, mid) for m in matrix)
if loc >= k:
hi = mid - 1
else:
lo = mid + 1
return lo
|
[
"info@crazysquirrel.ru"
] |
info@crazysquirrel.ru
|
e8937677ed1449ab35310cecf64433fe455a1935
|
835abbf0309503caddba77f4ed94ea3209e8784f
|
/kurs_dla_sredniozaawansowanych/itertools_groupby.py
|
7404c6c1405458b65739d460cc96b78a58e667c3
|
[] |
no_license
|
rafal-mizera/UDEMY
|
46873a01223a31b36de84fcfd13a5b9b1cf262e8
|
d463613ecd470dae4f17ce59d2f815f70942ea07
|
refs/heads/master
| 2023-07-19T12:14:44.818156
| 2021-09-01T16:14:37
| 2021-09-01T16:14:37
| 402,125,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
import os, itertools
def scantree(path):
for el in os.scandir(path):
if el.is_dir():
yield el
yield from scantree(el.path)
if not el.is_dir():
yield el
listing = scantree(r"C:\\Users\RMZ\PycharmProjects")
listing = sorted(listing,key= lambda x: x.is_dir())
for el in listing:
if el.is_dir():
print(f"{el} jest folderem plików")
if not el.is_dir():
print(f"{el} jest plikiem")
for is_dir, elements in itertools.groupby(listing,key=lambda e: e.is_dir()):
print(print('DIR ' if is_dir else 'FILE', len(list(elements))))
|
[
"rafalmizera11@gmail.com"
] |
rafalmizera11@gmail.com
|
c04acce25630ba2428d0ebcd3095b7bceebd7a59
|
c75ec82316ed5322c5844912ce9c528c24360b9f
|
/nsd1909/py02/day02/jiecheng.py
|
966d2d9c92ecc9d5f7c1468b18df28cbad74f5c0
|
[] |
no_license
|
MrZhangzhg/nsd2019
|
a94cde22f2e4bd648bb9e56ca63827f558f3c083
|
54f6d2c7b348a69f13ad5f38f2fbdc8207528749
|
refs/heads/master
| 2021-08-22T17:38:27.697675
| 2020-02-22T08:36:21
| 2020-02-22T08:36:21
| 183,539,489
| 21
| 24
| null | 2020-05-17T12:07:55
| 2019-04-26T02:06:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 385
|
py
|
def func(n):
if n == 1:
return 1
else:
return n * func(n - 1)
# 5 * func(4) # 不能直接返回,需要将func(4)的结果与5相乘,得到的结果再返回
# 5 * 4 * func(3)
# 5 * 4 * 3 * func(2)
# 5 * 4 * 3 * 2 * func(1)
# 5 * 4 * 3 * 2 * 1
if __name__ == '__main__':
print(func(5))
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
43114d2f7132bdd755e261c108c200afc4f4c9a5
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/surface/compute/instances/set_iam_policy.py
|
b9eea24fbd21a269b6f8e9fd753b33d2e48e35d9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""Command to set IAM policy for an instance resource."""
from googlecloudsdk.api_lib.compute import iam_base_classes
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SetIamPolicy(iam_base_classes.ZonalSetIamPolicy):
"""Set the IAM Policy for a Google Compute Engine instance resource."""
@staticmethod
def Args(parser):
iam_base_classes.ZonalSetIamPolicy.Args(parser, 'compute.instances')
@property
def service(self):
return self.compute.instances
@property
def resource_type(self):
return 'instances'
SetIamPolicy.detailed_help = iam_base_classes.SetIamPolicyHelp('instance')
|
[
"joe@longreen.io"
] |
joe@longreen.io
|
19b8bad12ca06aa46842267e1530a1532d13ae81
|
1ef56dcfef70ee14df8956eedd171f74406594af
|
/erp/biz/contact_unit_biz.py
|
44cf2026076d3860ea2709dc3884f82a98010696
|
[] |
no_license
|
journeyends/webtest
|
6c54ff19e01cd0cd99a34bcae55dd5701abf132f
|
2a24c6d7c52aa627edfbba3dd5eb9ccc16abe9fb
|
refs/heads/master
| 2020-04-17T07:38:52.873722
| 2019-03-21T09:36:50
| 2019-03-21T09:36:50
| 166,378,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from erp.idal.i_contact_unit_dal import I_contact_unit_dal
class Contact_unit_biz:
dal = I_contact_unit_dal().instance()
def getList(self, condition={}, user_info={}):
dal = Contact_unit_biz.dal
return dal.getList(condition=condition, user_info=user_info)
|
[
"songliyang@goldmantis.com"
] |
songliyang@goldmantis.com
|
d91ca4161a07129e9b9b27f17cd0465ab467fa7e
|
5ec7a72cab10dd39e0cc877caa1cb97c3cd9f3de
|
/garuda/models/dq/operation.py
|
ba1d5125ca3eb27edc83cce8bc6439db91f6ad76
|
[] |
no_license
|
raufer/spark-dsl
|
a1d311263fe48f64859c04cd63a79f48d8cd8fa4
|
a0fbf9561ba4567bc5d40bf2c7d289e214712aa6
|
refs/heads/main
| 2023-04-11T19:29:11.661273
| 2021-01-26T18:34:23
| 2021-01-26T18:34:23
| 367,982,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
import logging
from pydantic import BaseModel
from pydantic import validator
from garuda.dsl.mappings.operations import DSL_OPERATIONS
from garuda.models.dq.argument import Argument
from typing import List
logger = logging.getLogger(__name__)
class Operation(BaseModel):
"""
An operation represents a a Boolean Column as the result of
a function application
They are the most granular unit of computation of the engine
Op :: (...) -> bool
* each function is applicable to one or more columns;
* the argument list can also contain other native types
"""
id: str
arguments: List[Argument]
def __eq__(self, other):
if isinstance(other, self.__class__):
return all([
self.id == other.id,
self.arguments == other.arguments,
])
else:
return False
@staticmethod
def from_data(data):
id = data['id']
arguments = data['arguments']
return Operation(id=id, arguments=arguments)
|
[
"raufer92@gmail.com"
] |
raufer92@gmail.com
|
71ba4f507651adad9a4a6bea16e4fb6403802870
|
dbc4a65c17645fe2b77d4acd22d6c53ace261f89
|
/Numpy_bs/boolSetopSorting.py
|
3bc13b81a4446580efc48bf257a421f0260eef06
|
[] |
no_license
|
felipeguth/basics
|
52fba5e94c4ebeda53a881a4f3f806e10a000c98
|
40669c03324e44a36466d760e0b6af923e0fafd0
|
refs/heads/master
| 2023-08-15T10:30:59.759551
| 2021-08-20T18:21:43
| 2021-08-20T18:21:43
| 203,874,748
| 0
| 0
| null | 2023-07-22T14:17:41
| 2019-08-22T21:16:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,257
|
py
|
import numpy as np
# We create a 5 x 5 ndarray that contains integers from 0 to 24
X = np.arange(25).reshape(5, 5)
# We print X
print()
print('Original X = \n', X)
print()
# We use Boolean indexing to select elements in X:
print('The elements in X that are greater than 10:', X[X > 10])
print('The elements in X that less than or equal to 7:', X[X <= 7])
print('The elements in X that are between 10 and 17:', X[(X > 10) & (X < 17)])
# We use Boolean indexing to assign the elements that are between 10 and 17 the value of -1
X[(X > 10) & (X < 17)] = -1
# We print X
print()
print('X = \n', X)
print()
#SET OPS
# We create a rank 1 ndarray
x = np.array([1,2,3,4,5])
# We create a rank 1 ndarray
y = np.array([6,7,2,8,4])
# We print x
print()
print('x = ', x)
# We print y
print()
print('y = ', y)
# We use set operations to compare x and y:
print()
print('The elements that are both in x and y:', np.intersect1d(x,y))
print('The elements that are in x that are not in y:', np.setdiff1d(x,y))
print('All the elements of x and y:',np.union1d(x,y))
#SORT
# We create an unsorted rank 1 ndarray
x = np.random.randint(1,11,size=(10,))
# We print x
print()
print('Original x = ', x)
# We sort x and print the sorted array using sort as a function.
print()
print('Sorted x (out of place):', np.sort(x))
# When we sort out of place the original array remains intact. To see this we print x again
print()
print('x after sorting:', x)
# We sort x but only keep the unique elements in x
print(np.sort(np.unique(x)))
# We create an unsorted rank 1 ndarray
x = np.random.randint(1,11,size=(10,))
# We print x
print()
print('Original x = ', x)
# We sort x and print the sorted array using sort as a method.
x.sort()
# When we sort in place the original array is changed to the sorted array. To see this we print x again
print()
print('x after sorting:', x)
# We create an unsorted rank 2 ndarray
X = np.random.randint(1,11,size=(5,5))
# We print X
print()
print('Original X = \n', X)
print()
# We sort the columns of X and print the sorted array
print()
print('X with sorted columns :\n', np.sort(X, axis = 0))
# We sort the rows of X and print the sorted array
print()
print('X with sorted rows :\n', np.sort(X, axis = 1))
|
[
"felipeguth@gmail.com"
] |
felipeguth@gmail.com
|
874eb69589c1c8e3384d0af1565f3fb7bbaa4eb5
|
19da1a56f137a08772c347cf974be54e9c23c053
|
/lib/adafruit_bitmap_font/bitmap_font.py
|
b958a6d5e9f99646e075e137ab049f2028ea3062
|
[] |
no_license
|
mk53202/mk53202-timeclock-pyportal
|
d94f45a9d186190a4bc6130077baa6743a816ef3
|
230a858d429f8197c00cab3e67dcfd3b295ffbe0
|
refs/heads/master
| 2021-02-04T05:38:25.533292
| 2020-02-27T22:45:56
| 2020-02-27T22:45:56
| 243,626,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,343
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Scott Shawcroft for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_bitmap_font.bitmap_font`
====================================================
Loads bitmap glyphs from a variety of font.
* Author(s): Scott Shawcroft
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
__version__ = "1.0.3"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Bitmap_Font.git"
def load_font(filename, bitmap=None):
"""Loads a font file. Returns None if unsupported."""
if not bitmap:
import displayio
bitmap = displayio.Bitmap
font_file = open(filename, "rb")
first_four = font_file.read(4)
#print(first_four)
if filename.endswith("bdf") and first_four == b"STAR":
from . import bdf
return bdf.BDF(font_file, bitmap)
if filename.endswith("pcf") and first_four == b"\x01fcp":
import pcf
return pcf.PCF(font_file)
if filename.endswith("ttf") and first_four == b"\x00\x01\x00\x00":
import ttf
return ttf.TTF(font_file)
return None
|
[
"mkoster@stack41.com"
] |
mkoster@stack41.com
|
9c97389a9de5ea9c3f054a8cc24a3d65143ec055
|
3da991a057cd81de802c40da2edd640878685258
|
/test/test_op_normalization.py
|
c706451847070bc4b6095de92fb86e51b8490ca8
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
sjx0451/pytorch
|
9f5b1c0c7c874f9da72c0190dc131944ba828ab7
|
3544f60f7602081398ee62bc5d652a87f4743dab
|
refs/heads/master
| 2022-12-01T22:30:29.888370
| 2020-08-13T23:45:58
| 2020-08-13T23:48:31
| 287,421,291
| 2
| 0
|
NOASSERTION
| 2020-08-14T02:06:11
| 2020-08-14T02:06:11
| null |
UTF-8
|
Python
| false
| false
| 4,347
|
py
|
import torch
from torch.testing import FileCheck
from torch.testing._internal.common_utils import \
(run_tests)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, skipCPUIfNoLapack, skipCUDAIfNoMagma)
# Information for generating an alias test
# NOTE: ending the alias_name with an underscore will interpret the test
# as the test for an inplace method of that name
class AliasInfo(object):
__slots__ = ['alias_name', 'alias_op', 'original_name', 'input', 'args', 'decorators']
def __init__(self,
alias_name, # the name of the alias
alias_op, # the aliased op
original_name, # the name of the original function
input, # the first tensor argument to the op
*,
args=(), # tuple of additional positional arguments
decorators=()): # decorators to apply to the test
self.alias_name = alias_name
self.alias_op = alias_op
self.original_name = original_name
self.input = input
self.args = args
self.decorators = decorators
alias_infos = (
AliasInfo('absolute', torch.absolute, 'abs',
torch.randn(20) - .5),
AliasInfo('absolute_', torch.Tensor.absolute_, 'abs_',
torch.randn(20) - .5),
AliasInfo('clip', torch.clip, 'clamp',
torch.randn(20), args=(.4, .6)),
AliasInfo('clip_', torch.Tensor.clip_, 'clamp_',
torch.randn(20), args=(.4, .6)),
AliasInfo('linalg.det', torch.linalg.det, 'det',
torch.randn(10, 10), decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma)),
AliasInfo('outer', torch.outer, 'ger',
torch.randn(20), args=(torch.randn(20),))
)
# Placeholder test class for validating that aliases are correctly
# translated when scripted and traced
class TestOpNormalization(JitTestCase):
pass
# Generates alias tests and adds them to the specified class (cls)
def create_alias_tests(cls):
for info in alias_infos:
@torch.no_grad()
def _test(self, device, info=info):
tensor = torch.tensor
op = info.alias_op
is_inplace = info.alias_name.endswith('_')
# Checks that scripting converts aliases
# NOTE: the code to test scripting must be generated since
# scripting does not support splatting args or directly
# calling torch.Tensor methods. The following
# splats args after the first tensor by inlining them as constants.
if is_inplace:
fn_template = '''
def _fn(t):
return t.{alias_name}({args})
'''
arg_string = ', '.join((str(arg) for arg in info.args))
script = fn_template.format(alias_name=info.alias_name, args=arg_string)
else:
fn_template = '''
def _fn(t):
return op(t{args})
'''
arg_string = ", " + ', '.join((str(arg) for arg in info.args))
script = fn_template.format(args=arg_string)
# Compiles script
scripted = torch.jit.CompilationUnit(script)._fn
# Acquires and checks the graph remaps the alias
scripted(info.input)
graph = scripted.graph_for(info.input)
FileCheck().check(info.original_name).check_not(info.alias_name).run(graph)
# Checks that tracing converts aliases
# NOTE: tracing has no problem splatting args
def _fn(t):
return info.alias_op(t, *info.args)
traced = torch.jit.trace(_fn, (info.input,))
traced(info.input)
graph = traced.graph_for(info.input)
FileCheck().check(info.original_name).check_not(info.alias_name).run(graph)
# Applies decorators
for decorator in info.decorators:
_test = decorator(_test)
test_name = "test_alias_" + info.alias_name
setattr(cls, test_name, _test)
create_alias_tests(TestOpNormalization)
instantiate_device_type_tests(TestOpNormalization, globals())
if __name__ == '__main__':
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
ca558233180fa9b596dcb99d1b085674e4516699
|
e12edf6cde9670eb3d4f1596cd648fddefaf480f
|
/acm-practice/2017-10-26/D_suspension_binary_search.py
|
1661f660b42de137344239ea94ac4938ef6967cb
|
[] |
no_license
|
VitamintK/AlgorithmProblems
|
7a7786a0377a236f5cc82ae3b623ecad7f0eb025
|
34da53e2e7d3b5964bafd1f3edb2e00dea9a729d
|
refs/heads/master
| 2023-06-29T00:56:53.360829
| 2023-06-14T16:33:14
| 2023-06-14T16:33:14
| 25,510,479
| 11
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
d, s = map(int, input().split())
l = 0
r = 123456789
eps = 0.000001
import math
for i in range(123456):
a = (r+l)/2
term = (d/(2*a))
sag = 0.5*a*(math.exp(term) + math.exp(-term)) - a
if sag < s:
r = a
else:
l = a
term = d/(2*l)
print(l * (math.exp(term) - math.exp(-term)))
|
[
"kevinwang@kevinwang.us"
] |
kevinwang@kevinwang.us
|
96f75f958e7146a11ac99d2224a3a125a48406dc
|
60ca69e2a4c6b05e6df44007fd9e4a4ed4425f14
|
/grand_contest/037/A.py
|
96f513b224075418d215cfb1e943c483fd3a12f5
|
[
"MIT"
] |
permissive
|
FGtatsuro/myatcoder
|
12a9daafc88efbb60fc0cd8840e594500fc3ee55
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
refs/heads/master
| 2021-06-13T15:24:07.906742
| 2021-05-16T11:47:09
| 2021-05-16T11:47:09
| 195,441,531
| 0
| 0
|
MIT
| 2021-05-16T11:47:10
| 2019-07-05T16:47:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
s = input().strip()
if len(s) == 1:
print(0)
sys.exit(0)
if len(s) == 2 and s[0] == s[1]:
print(0)
sys.exit(0)
# [i][0]: index iが1文字分割
# [i][1]: index iが2文字分割
dp = [[0] * 2 for _ in range(len(s))]
# 3文字以上では1文字/2文字の仮想結果が使われる
# 1文字
dp[0][0] = dp[0][1] = 1
# 2文字
if s[0] == s[1]:
# 実際には常に2文字分割になる
dp[1][0] = dp[1][1] = 1
else:
dp[1][0] = 2
dp[1][1] = 1
# 3文字以上
for i in range(2, len(s)):
# [i][0]: 末尾の分割を1文字にする場合
# 末尾2文字が同じ場合
# - 1つ前を2文字分割にしなくてはならない
if s[i] == s[i-1]:
dp[i][0] = dp[i-1][1] + 1
# 末尾2文字が異なる場合
# - 1つ前が1文字分割
# - 1つ前が2文字分割
else:
dp[i][0] = max(dp[i-1][0] + 1, dp[i-1][1] + 1)
# [i][1]: 末尾の分割を2文字にする場合
# 末尾前の2文字が末尾の2文字と同じ場合
# 末尾前を1文字分割にしなくてはならない
if s[i-3:i-1] == s[i-1:i+1]:
dp[i][1] = dp[i-2][0] + 1
# 末尾前の2文字が末尾の2文字と異なる場合
# - 末尾前が1文字分割
# - 末尾前が2文字分割
else:
dp[i][1] = max(dp[i-2][0] + 1, dp[i-2][1] + 1)
print(max(dp[-1][0], dp[-1][1]))
|
[
"204491+FGtatsuro@users.noreply.github.com"
] |
204491+FGtatsuro@users.noreply.github.com
|
03ba7abe47bcab7d9872f3847417428cf3b2f187
|
cc2bb9ccc66783ac7d37454e4784df5e4a2d80f4
|
/server/mysqldb.py
|
f2feb13c8fbf3f3239e7424de6323d998304ed62
|
[] |
no_license
|
ronnyzh/Tornado_Server
|
f308b7e9c2112167b04cbe324e37b1f891999187
|
42112d39e4dea128d059dbfa53c410f3774dc4b1
|
refs/heads/master
| 2021-05-22T22:10:26.694262
| 2020-04-04T23:39:14
| 2020-04-04T23:39:14
| 253,118,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# -*- coding:utf-8 -*-
# !/bin/python
"""
Author: ronnyzh
Date: 2019/10/15
Revision: 1.0.0
Description: Description
"""
from configs import CONFIGS
from model.model_mysql import MySQLdb
from model.model_asyn_mysql import Async_Mysql
import tornado.ioloop
from public.public_logger import *
mysql_logger = getHandlerLogger(fileLabel='mysql', loggerLabel='mysql', level=logging.DEBUG,
handler_types=[Handler_Class.RotatingFile])
mysqlDB = MySQLdb(CONFIGS['mysql'])
async_mysqlDb = Async_Mysql(CONFIGS['async_mysql'], logger=mysql_logger)
tornado.ioloop.IOLoop.current().spawn_callback(async_mysqlDb.createPool_async)
|
[
"ronnyzh@yeah.net"
] |
ronnyzh@yeah.net
|
a404e63cb25c5f20ac03b7c7d3a2529e11c00d6f
|
1388bcd6de659ffefe97e7e6c2aee685b5e7c534
|
/stubs/stubs/Utilityrate4.pyi
|
0b9fd269d9178b5f58e6650042aed54b3107d49f
|
[
"BSD-3-Clause"
] |
permissive
|
BRIK-Engenharia/pysam
|
a7b4b543131043510023a5c17b057ead0b39d440
|
2a4115f34419edf9776b0bbc7b3f453c958ce734
|
refs/heads/master
| 2022-12-06T05:15:35.364375
| 2020-09-03T22:59:17
| 2020-09-03T22:59:17
| 297,958,820
| 1
| 0
|
BSD-3-Clause
| 2020-09-23T12:13:32
| 2020-09-23T12:13:32
| null |
UTF-8
|
Python
| false
| false
| 6,297
|
pyi
|
class Common(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
analysis_period = float
load_escalation = tuple
rate_escalation = tuple
system_use_lifetime_output = float
ur_annual_min_charge = float
ur_dc_enable = float
ur_dc_flat_mat = tuple
ur_dc_sched_weekday = tuple
ur_dc_sched_weekend = tuple
ur_dc_tou_mat = tuple
ur_ec_sched_weekday = tuple
ur_ec_sched_weekend = tuple
ur_ec_tou_mat = tuple
ur_metering_option = float
ur_monthly_fixed_charge = float
ur_monthly_min_charge = float
ur_nm_yearend_sell_rate = float
ur_sell_eq_buy = float
class TimeSeries(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
gen = tuple
load = tuple
class Financials(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
inflation_rate = float
class AnnualOutput(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
degradation = tuple
class Outputs(object):
def assign(self):
pass
def export(self) -> Dict[Dict]:
pass
def __init__(self, *args, **kwargs):
pass
annual_electric_load = tuple
annual_energy_value = tuple
charge_w_sys_dc_fixed = tuple
charge_w_sys_dc_fixed_ym = tuple
charge_w_sys_dc_tou = tuple
charge_w_sys_dc_tou_ym = tuple
charge_w_sys_ec = tuple
charge_w_sys_ec_apr_tp = tuple
charge_w_sys_ec_aug_tp = tuple
charge_w_sys_ec_dec_tp = tuple
charge_w_sys_ec_feb_tp = tuple
charge_w_sys_ec_jan_tp = tuple
charge_w_sys_ec_jul_tp = tuple
charge_w_sys_ec_jun_tp = tuple
charge_w_sys_ec_mar_tp = tuple
charge_w_sys_ec_may_tp = tuple
charge_w_sys_ec_nov_tp = tuple
charge_w_sys_ec_oct_tp = tuple
charge_w_sys_ec_sep_tp = tuple
charge_w_sys_ec_ym = tuple
charge_w_sys_fixed = tuple
charge_w_sys_fixed_ym = tuple
charge_w_sys_minimum = tuple
charge_w_sys_minimum_ym = tuple
charge_wo_sys_dc_fixed = tuple
charge_wo_sys_dc_fixed_ym = tuple
charge_wo_sys_dc_tou = tuple
charge_wo_sys_dc_tou_ym = tuple
charge_wo_sys_ec = tuple
charge_wo_sys_ec_apr_tp = tuple
charge_wo_sys_ec_aug_tp = tuple
charge_wo_sys_ec_dec_tp = tuple
charge_wo_sys_ec_feb_tp = tuple
charge_wo_sys_ec_jan_tp = tuple
charge_wo_sys_ec_jul_tp = tuple
charge_wo_sys_ec_jun_tp = tuple
charge_wo_sys_ec_mar_tp = tuple
charge_wo_sys_ec_may_tp = tuple
charge_wo_sys_ec_nov_tp = tuple
charge_wo_sys_ec_oct_tp = tuple
charge_wo_sys_ec_sep_tp = tuple
charge_wo_sys_ec_ym = tuple
charge_wo_sys_fixed = tuple
charge_wo_sys_fixed_ym = tuple
charge_wo_sys_minimum = tuple
charge_wo_sys_minimum_ym = tuple
elec_cost_with_system = tuple
elec_cost_with_system_year1 = float
elec_cost_without_system = tuple
elec_cost_without_system_year1 = float
energy_w_sys_ec_apr_tp = tuple
energy_w_sys_ec_aug_tp = tuple
energy_w_sys_ec_dec_tp = tuple
energy_w_sys_ec_feb_tp = tuple
energy_w_sys_ec_jan_tp = tuple
energy_w_sys_ec_jul_tp = tuple
energy_w_sys_ec_jun_tp = tuple
energy_w_sys_ec_mar_tp = tuple
energy_w_sys_ec_may_tp = tuple
energy_w_sys_ec_nov_tp = tuple
energy_w_sys_ec_oct_tp = tuple
energy_w_sys_ec_sep_tp = tuple
energy_wo_sys_ec_apr_tp = tuple
energy_wo_sys_ec_aug_tp = tuple
energy_wo_sys_ec_dec_tp = tuple
energy_wo_sys_ec_feb_tp = tuple
energy_wo_sys_ec_jan_tp = tuple
energy_wo_sys_ec_jul_tp = tuple
energy_wo_sys_ec_jun_tp = tuple
energy_wo_sys_ec_mar_tp = tuple
energy_wo_sys_ec_may_tp = tuple
energy_wo_sys_ec_nov_tp = tuple
energy_wo_sys_ec_oct_tp = tuple
energy_wo_sys_ec_sep_tp = tuple
lifetime_load = tuple
savings_year1 = float
surplus_w_sys_ec_apr_tp = tuple
surplus_w_sys_ec_aug_tp = tuple
surplus_w_sys_ec_dec_tp = tuple
surplus_w_sys_ec_feb_tp = tuple
surplus_w_sys_ec_jan_tp = tuple
surplus_w_sys_ec_jul_tp = tuple
surplus_w_sys_ec_jun_tp = tuple
surplus_w_sys_ec_mar_tp = tuple
surplus_w_sys_ec_may_tp = tuple
surplus_w_sys_ec_nov_tp = tuple
surplus_w_sys_ec_oct_tp = tuple
surplus_w_sys_ec_sep_tp = tuple
utility_bill_w_sys = tuple
utility_bill_w_sys_ym = tuple
utility_bill_wo_sys = tuple
utility_bill_wo_sys_ym = tuple
year1_electric_load = float
year1_hourly_dc_peak_per_period = tuple
year1_hourly_dc_tou_schedule = tuple
year1_hourly_dc_with_system = tuple
year1_hourly_dc_without_system = tuple
year1_hourly_e_fromgrid = tuple
year1_hourly_e_tofromgrid = tuple
year1_hourly_e_togrid = tuple
year1_hourly_ec_tou_schedule = tuple
year1_hourly_ec_with_system = tuple
year1_hourly_ec_without_system = tuple
year1_hourly_p_system_to_load = tuple
year1_hourly_p_tofromgrid = tuple
year1_hourly_salespurchases_with_system = tuple
year1_hourly_salespurchases_without_system = tuple
year1_hourly_system_to_load = tuple
year1_monthly_cumulative_excess_dollars = tuple
year1_monthly_cumulative_excess_generation = tuple
year1_monthly_dc_fixed_with_system = tuple
year1_monthly_dc_fixed_without_system = tuple
year1_monthly_dc_tou_with_system = tuple
year1_monthly_dc_tou_without_system = tuple
year1_monthly_ec_charge_with_system = tuple
year1_monthly_ec_charge_without_system = tuple
year1_monthly_electricity_to_grid = tuple
year1_monthly_fixed_with_system = tuple
year1_monthly_fixed_without_system = tuple
year1_monthly_load = tuple
year1_monthly_minimum_with_system = tuple
year1_monthly_minimum_without_system = tuple
year1_monthly_peak_w_system = tuple
year1_monthly_peak_wo_system = tuple
year1_monthly_use_w_system = tuple
year1_monthly_use_wo_system = tuple
year1_monthly_utility_bill_w_sys = tuple
year1_monthly_utility_bill_wo_sys = tuple
class Utilityrate4(object):
def assign(self, dict):
pass
def value(self, name, value=None):
pass
def execute(self, int_verbosity):
pass
def export(self):
pass
def __getattribute__(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
Common = Common
TimeSeries = TimeSeries
Financials = Financials
AnnualOutput = AnnualOutput
Outputs = Outputs
def default(config) -> Utilityrate4:
pass
def new() -> Utilityrate4:
pass
def wrap(ssc_data_t) -> Utilityrate4:
pass
def from_existing(model, config="") -> Utilityrate4:
pass
__loader__ = None
__spec__ = None
|
[
"dguittet@nrel.gov"
] |
dguittet@nrel.gov
|
f1c6b8b4f3add343bd97fe50d6c9ac34e1446bc6
|
099c5d0d21de342ad578be0fa06dde6be10b4e95
|
/saltcloud/clouds/joyent.py
|
5f6729d01b3fa922fc700a5e3345b90c34949cca
|
[
"Apache-2.0"
] |
permissive
|
lexual/salt-cloud
|
b09835795a0221c3d283e7e17c60ac68f76ee226
|
063ac2050f27181ea6da8e3ece528974f8284b72
|
refs/heads/master
| 2021-01-17T12:21:01.688573
| 2012-08-13T19:46:19
| 2012-08-13T19:46:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,900
|
py
|
'''
Joyent Cloud Module
===================
The Joyent Cloud module is used to intereact with the Joyend cloud system
it requires that the username and password to the joyent accound be configured
.. code-block:: yaml
# The Joyent login user
JOYENT.user: fred
# The Joyent user's password
JOYENT.password: saltybacon
# The location of the ssh private key that can log into the new vm
JOYENT.private_key: /root/joyent.pem
'''
# The import section is mostly libcloud boilerplate
# Import python libs
import os
import subprocess
import types
# Import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import MultiStepDeployment, ScriptDeployment, SSHKeyDeployment
# Import generic libcloud functions
import saltcloud.utils
from saltcloud.libcloudfuncs import *
# Some of the libcloud functions need to be in the same namespace as the
# functions defined in the module, so we create new function objects inside
# this module namespace
avail_images = types.FunctionType(avail_images.__code__, globals())
avail_sizes = types.FunctionType(avail_sizes.__code__, globals())
script = types.FunctionType(script.__code__, globals())
destroy = types.FunctionType(destroy.__code__, globals())
list_nodes = types.FunctionType(list_nodes.__code__, globals())
# Only load in this module is the JOYENT configurations are in place
def __virtual__():
'''
Set up the libcloud functions and check for JOYENT configs
'''
if 'JOYENT.user' in __opts__ and 'JOYENT.password' in __opts__:
return 'joyent'
return False
def get_conn():
'''
Return a conn object for the passed vm data
'''
driver = get_driver(Provider.JOYENT)
return driver(
__opts__['JOYENT.user'],
__opts__['JOYENT.password'],
)
def create(vm_):
'''
Create a single vm from a data dict
'''
print('Creating Cloud VM {0}'.format(vm_['name']))
conn = get_conn()
deploy_script = script(vm_)
kwargs = {}
kwargs['name'] = vm_['name']
kwargs['image'] = get_image(conn, vm_)
kwargs['size'] = get_size(conn, vm_)
data = conn.create_node(**kwargs)
if saltcloud.utils.wait_for_ssh(data.public_ips[0]):
cmd = ('ssh -oStrictHostKeyChecking=no -t -i {0} {1}@{2} '
'"{3}"').format(
__opts__['JOYENT.private_key'],
'root',
data.public_ips[0],
deploy_script.script,
)
subprocess.call(cmd, shell=True)
else:
print('Failed to start Salt on Cloud VM {0}'.format(vm_['name']))
print('Created Cloud VM {0} with the following values:'.format(
vm_['name']
))
for key, val in data.__dict__.items():
print(' {0}: {1}'.format(key, val))
|
[
"thatch45@gmail.com"
] |
thatch45@gmail.com
|
d9b758cc0246650a55d4873c5080463895e12575
|
6e7b1305887d25ae60251ce54ed2b94dc37ea06c
|
/Ecommerce/shopping/migrations/0008_order_product.py
|
310c90465104e681244c4f32141b478425d05d25
|
[] |
no_license
|
Aadeshkale/assignment
|
5bedaa95c6d2457d87b239117259c2a17d765c0f
|
bac0fa5523e3e6179dfe907f493a677adda7993b
|
refs/heads/master
| 2023-01-08T04:11:14.847962
| 2020-11-12T09:37:15
| 2020-11-12T09:37:15
| 304,293,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
# Generated by Django 3.1.1 on 2020-10-14 07:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shopping', '0007_auto_20201011_1315'),
]
operations = [
migrations.CreateModel(
name='order_product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(max_length=100, null=True)),
('house_no', models.CharField(blank=True, max_length=100, null=True)),
('area_name', models.CharField(max_length=100, null=True)),
('city', models.CharField(max_length=100, null=True)),
('state', models.CharField(max_length=100, null=True)),
('email', models.CharField(blank=True, max_length=100, null=True)),
('pincode', models.CharField(max_length=100, null=True)),
('mob1', models.CharField(max_length=100, null=True)),
('mob2', models.CharField(blank=True, max_length=100, null=True)),
('status', models.CharField(max_length=100, null=True)),
('landmark', models.CharField(max_length=100, null=True)),
('pro', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='shopping.product')),
('usr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"aadeshkale0@gmail.com"
] |
aadeshkale0@gmail.com
|
d45f5f1e80eb49d2b796c96c0f6f74279719c04f
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/Calorimeter/CaloCondPhysAlgs/share/CaloNoise2Ntuple_data.py
|
b272eccf19df896d9085998da90333c610274a83
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,388
|
py
|
###############################################################
#
# Job options file for CaloNoise2Ntuple
#
#==============================================================
# configuration for data, read noise from database through CaloNoiseToolDB
if 'RunNumber' not in dir():
RunNumber = 258914
if 'LumiBlock' not in dir():
LumiBlock = 1
if 'GlobalTag' not in dir():
GlobalTag = 'CONDBR2-ES1PA-2015-04'
if 'Geometry' not in dir():
Geometry = 'ATLAS-R2-2015-03-01-00'
if 'outputNtuple' not in dir():
outputNtuple="cellnoise_data.root"
from RecExConfig.RecFlags import rec
rec.RunNumber.set_Value_and_Lock(RunNumber)
from PerfMonComps.PerfMonFlags import jobproperties
jobproperties.PerfMonFlags.doMonitoring = True
from AthenaCommon.Resilience import treatException,protectedInclude
protectedInclude( "PerfMonComps/PerfMonSvc_jobOptions.py" )
from AthenaCommon.DetFlags import DetFlags
DetFlags.all_setOff()
DetFlags.LAr_setOn()
DetFlags.Tile_setOn()
DetFlags.digitize.all_setOff()
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('atlas')
globalflags.DataSource.set_Value_and_Lock('data')
from CaloTools.CaloNoiseFlags import jobproperties
jobproperties.CaloNoiseFlags.FixedLuminosity.set_Value_and_Lock(-1.)
import AthenaCommon.AtlasUnixGeneratorJob
# Get a handle to the default top-level algorithm sequence
from AthenaCommon.AppMgr import ToolSvc
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
# Get a handle to the ApplicationManager
from AthenaCommon.AppMgr import theApp
# Setup Db stuff
import AthenaPoolCnvSvc.AthenaPool
from AthenaCommon.GlobalFlags import jobproperties
jobproperties.Global.DetDescrVersion=Geometry
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py")
include( "CaloIdCnv/CaloIdCnv_joboptions.py" )
include( "TileIdCnv/TileIdCnv_jobOptions.py" )
include( "LArDetDescr/LArDetDescr_joboptions.py" )
include("TileConditions/TileConditions_jobOptions.py" )
include("LArConditionsCommon/LArConditionsCommon_comm_jobOptions.py")
svcMgr.IOVDbSvc.GlobalTag = GlobalTag
from CaloTools.CaloNoiseToolDefault import CaloNoiseToolDefault
theCaloNoiseTool = CaloNoiseToolDefault()
theCaloNoiseTool.RescaleForHV=False
ToolSvc += theCaloNoiseTool
if "dbNoise" in dir():
conddb.addMarkup("/LAR/NoiseOfl/CellNoise","<db>"+dbNoise+"</db>")
if "folderTag" in dir():
conddb.addOverride("/LAR/NoiseOfl/CellNoise",folderTag)
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
from CaloCondPhysAlgs.CaloCondPhysAlgsConf import CaloNoise2Ntuple
theCaloNoise2Ntuple = CaloNoise2Ntuple("CaloNoise2Ntuple")
theCaloNoise2Ntuple.noiseTool = theCaloNoiseTool
topSequence += theCaloNoise2Ntuple
#--------------------------------------------------------------
#--- Dummy event loop parameters
#--------------------------------------------------------------
svcMgr.EventSelector.RunNumber = RunNumber
svcMgr.EventSelector.EventsPerRun = 1
svcMgr.EventSelector.FirstEvent = 0
svcMgr.EventSelector.EventsPerLB = 1
svcMgr.EventSelector.FirstLB = LumiBlock
svcMgr.EventSelector.InitialTimeStamp = 0
svcMgr.EventSelector.TimeStampInterval = 5
svcMgr.EventSelector.OverrideRunNumber=True
theApp.EvtMax = 1
# ------------------------------------------------------------------
# --- Ntuple
# ------------------------------------------------------------------
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
ServiceMgr.THistSvc.Output = ["file1 DATAFILE='"+outputNtuple+"' OPT='RECREATE'"];
#--------------------------------------------------------------
# Set output level threshold (1=VERBOSE, 2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
#--------------------------------------------------------------
svcMgr.MessageSvc.OutputLevel = INFO
svcMgr.MessageSvc.debugLimit = 100000
svcMgr.MessageSvc.infoLimit = 100000
svcMgr.MessageSvc.Format = "% F%30W%S%7W%R%T %0W%M"
svcMgr.IOVDbSvc.OutputLevel = INFO
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
916d8ba379bd9fe0dcfbd4758f028f88f55562fa
|
d044e88e622d9f4ca350aa4fd9d95d7ba2fae50b
|
/application/budget/migrations/0005_auto_20210518_1730.py
|
2650551c2c42b96e2b7524ac633bd6e6f8c70b2f
|
[] |
no_license
|
Tiny-Hands/tinyhands
|
337d5845ab99861ae189de2b97b8b36203c33eef
|
77aa0bdcbd6f2cbedc7eaa1fa4779bb559d88584
|
refs/heads/develop
| 2023-09-06T04:23:06.330489
| 2023-08-31T11:31:17
| 2023-08-31T11:31:17
| 24,202,150
| 7
| 3
| null | 2023-08-31T11:31:18
| 2014-09-18T19:35:02
|
PLpgSQL
|
UTF-8
|
Python
| false
| false
| 683
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-05-18 17:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20210120_1903'),
]
operations = [
migrations.AlterField(
model_name='otherbudgetitemcost',
name='form_section',
field=models.IntegerField(blank=True, null=True, verbose_name=[(1, 'Travel'), (2, 'Miscellaneous'), (3, 'Awareness'), (5, 'Potential Victim Care'), (7, 'Communication'), (8, 'Staff & Benefits'), (10, 'Administration'), (11, 'Past Month Sent Money')]),
),
]
|
[
"scrishel@sbcglobal.net"
] |
scrishel@sbcglobal.net
|
49f3bee05885e479558b18dcff7a038de7a5e3ba
|
250db406ad4a62e3d576e55b979bcfdc3407f226
|
/Leetcode分类/1. Array /Leetcode_27 Remove Element/my_solution.py
|
eb387a47da03f53268d0f9347d5e5e9c16df2127
|
[] |
no_license
|
chenshanghao/Interview_preparation
|
0830f0e461a2fe287b8ec24ae761974f50268767
|
4e7701d32990604c16ba18a8083c2108c0232306
|
refs/heads/master
| 2020-04-25T02:36:19.499364
| 2019-06-10T04:51:00
| 2019-06-10T04:51:00
| 172,446,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
index = 0
for i in range(len(nums)):
if nums[i] != val:
nums[index] = nums[i]
index += 1
return index
|
[
"21551021@zju.edu.cn"
] |
21551021@zju.edu.cn
|
f37cd5437e19fb4a0472381b67b5f554307c24ad
|
1e263d605d4eaf0fd20f90dd2aa4174574e3ebce
|
/components/ally-http/__setup__/ally_http/__init__.py
|
b06c5790549446b5d6d3390a164ae4559aab9422
|
[] |
no_license
|
galiminus/my_liveblog
|
698f67174753ff30f8c9590935d6562a79ad2cbf
|
550aa1d0a58fc30aa9faccbfd24c79a0ceb83352
|
refs/heads/master
| 2021-05-26T20:03:13.506295
| 2013-04-23T09:57:53
| 2013-04-23T09:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
'''
Created on Jul 15, 2011
@package: ally http
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Contains setup and configuration files for the HTTP REST server.
'''
from ally.container import ioc
# --------------------------------------------------------------------
NAME = 'ally HTTP'
GROUP = 'ally'
VERSION = '1.0'
DESCRIPTION = 'Provides the HTTP communication support'
# --------------------------------------------------------------------
# The default configurations
@ioc.config
def server_type() -> str:
'''
The type of the server to use, the options are:
"basic"- single threaded server, the safest but slowest server to use.
'''
return 'basic'
@ioc.config
def server_host() -> str:
'''The IP address to bind the server to, something like 127.0.0.1'''
return '0.0.0.0'
@ioc.config
def server_port() -> int:
'''The port on which the server will run'''
return 8080
@ioc.config
def server_version() -> str:
'''The server version name'''
return 'Ally/0.1'
|
[
"etienne@spillemaeker.com"
] |
etienne@spillemaeker.com
|
fdf0029733f0c29f70e2242919078fe8131e8b6b
|
f428482945cf11d0fa17aa1a0607f43ec8427614
|
/run_profile.py
|
3a8a3189a42f462a73a5eb880fb0a24e0f0f8f45
|
[] |
no_license
|
SomervilleJesusBall/KebleBall
|
22e3367797a0a9f740271dff40d5359e69a80f9d
|
09a7d9c6b86365c31827bfd44fa50d4527a646e4
|
refs/heads/master
| 2021-01-22T05:23:55.882724
| 2016-03-13T18:28:09
| 2016-04-14T12:17:45
| 47,200,067
| 0
| 0
| null | 2015-12-01T15:56:32
| 2015-12-01T15:56:31
| null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
#! /usr/bin/env python2
# coding: utf-8
"""Executable to run Eisitirio with profiling."""
from __future__ import unicode_literals
from werkzeug.contrib import profiler
from eisitirio import app
from eisitirio import system # pylint: disable=unused-import
APP = app.APP
APP.config.from_pyfile('config/development.py')
APP.config['PROFILE'] = True
APP.wsgi_app = profiler.ProfilerMiddleware(APP.wsgi_app, restrictions=[30])
if __name__ == '__main__':
APP.run()
|
[
"samuel.littley@toastwaffle.com"
] |
samuel.littley@toastwaffle.com
|
0901587cb483f5f586d862ad701e89b6273493d3
|
78649dd3fdfafc3edb7ef5b0de52096846cd9c28
|
/networking_mlnx_baremetal/ufmclient/session.py
|
4e246c17d2045be65610b4e4a49a1dd690b63064
|
[
"Apache-2.0"
] |
permissive
|
IamFive/networking-mlnx-baremetal
|
38c99b127a7b08850e9ce5c83f0a6539ec4fe9b6
|
8d65405a8546803f903cadd0cf0818007a1d0119
|
refs/heads/master
| 2023-02-02T05:47:02.166816
| 2020-12-02T08:01:40
| 2020-12-02T12:29:02
| 296,988,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,375
|
py
|
# Copyright 2020 HuaWei Technologies. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import requests
from requests.auth import HTTPBasicAuth
from networking_mlnx_baremetal.ufmclient import constants
from networking_mlnx_baremetal.ufmclient import exceptions
LOG = logging.getLogger(__name__)
HEAD = 'HEAD'
"""http method HEAD"""
GET = 'GET'
"""http method get"""
POST = 'POST'
"""http method POST"""
PATCH = 'PATCH'
"""http method PATCH"""
PUT = 'PUT'
"""http method PUT"""
DELETE = 'DELETE'
class UfmSession(object):
"""UFM REST API session"""
# Default timeout in seconds for requests connect and read
# http://docs.python-requests.org/en/master/user/advanced/#timeouts
_DEFAULT_TIMEOUT = 60
def __init__(self, endpoint, username, password, verify_ca, timeout=None):
self.endpoint = endpoint
self.base_url = '%s/ufmRest' % endpoint
self._timeout = timeout if timeout else self._DEFAULT_TIMEOUT
# Initial request session
self._session = requests.Session()
self._session.verify = verify_ca
self._session.auth = HTTPBasicAuth(username, password)
from networking_mlnx_baremetal import __version__ as version
self._session.headers.update({
'User-Agent': 'python-ufmclient - v%s' % version
})
def get_url(self, path):
"""get absolute URL for UFM REST API resource
:param path: path of resource, can be relative path or absolute path
:return:
"""
if path.startswith(self.base_url):
return path
elif path.startswith('/ufmRest'):
return '%s%s' % (self.endpoint, path)
else:
return '%s%s' % (self.base_url, path)
def get(self, url, headers=None):
return self.request(GET, url, headers=headers)
def post(self, url, payload, headers=None):
return self.request(POST, url, json=payload, headers=headers)
def put(self, url, payload, headers=None):
return self.request(PUT, url, json=payload, headers=headers)
def patch(self, url, payload, headers=None):
return self.request(PATCH, url, json=payload, headers=headers)
def delete(self, url, headers=None):
return self.request(DELETE, url, headers=headers)
def request(self, method, url, json=None, headers=None):
try:
url = self.get_url(url)
return self._request(method, url, json=json, headers=headers)
except requests.exceptions.RequestException as e:
response = e.response
if response is not None:
LOG.warning('UFM responses -> %(method)s %(url)s, '
'code: %(code)s, response: %(resp_txt)s',
{'method': method, 'url': url,
'code': response.status_code,
'resp_txt': response.content})
raise exceptions.raise_for_response(method, url, response)
else:
raise exceptions.UfmConnectionError(url=url, error=e)
def _request(self, method, url, json=None, headers=None):
if method.upper() in [constants.POST, constants.PATCH, constants.PUT]:
headers = headers or {}
headers.update({constants.HEADER_CONTENT_TYPE: 'application/json'})
req = requests.Request(method, url, json=json, headers=headers)
prepped_req = self._session.prepare_request(req)
res = self._session.send(prepped_req, timeout=self._timeout)
res.raise_for_status()
LOG.debug('UFM responses -> %(method)s %(url)s, code: %(code)s, '
'content:: %(content)s',
{'method': method, 'url': url, 'code': res.status_code,
'content': res.text})
return res
|
[
"iampurse@vip.qq.com"
] |
iampurse@vip.qq.com
|
bf3d53245cc918b53d410fb4e30485d53f1b055f
|
98d34935bfa9b709c07df539267daa6f3a6db880
|
/kikar_hamedina/mks/factories/member_factory.py
|
b99cce744d9c25d90690e045e9aff5fd62a4e562
|
[] |
no_license
|
hasadna/kikar-hamedina
|
c4a0e939fdafb1f8d187db1be35aba5fde2350be
|
d08e9231fd4c91c4024ced26b760b87f93bb8607
|
refs/heads/dev
| 2020-12-25T18:04:22.817008
| 2019-03-30T15:27:13
| 2019-03-30T15:27:13
| 18,186,117
| 12
| 36
| null | 2019-03-30T15:28:06
| 2014-03-27T18:11:01
|
Python
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
import factory
from mks import models
from party_factory import PartyFactory
class MemberFactory(factory.DjangoModelFactory):
class Meta:
model = models.Member
name = factory.sequence(lambda n: u"Name {}".format(n))
name_en = factory.sequence(lambda n: u"Name {}".format(n))
current_party = factory.SubFactory(PartyFactory)
is_current = True
|
[
"yotammanor@gmail.com"
] |
yotammanor@gmail.com
|
92faa3cc6c9b46d19d31350aa17bf68325786ac2
|
d1c3a9a4289b1aa262285b5de5084f3074893703
|
/games/forms.py
|
2465c9d8ef21a1a536c51afe892d02c167bedc86
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Code-Institute-Submissions/stream-three-project-1
|
a9ce00a608b5d12d0c4ef48546c265f0110fb55e
|
96a5718a22f57b908ea5eb76298ceffdb1f17c8b
|
refs/heads/master
| 2020-03-21T11:13:19.873566
| 2018-06-24T14:57:31
| 2018-06-24T14:57:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
from django import forms
from datetime import datetime
# Form to select a season when viewing the league standings. Just a simple select field to choose a year.
class SeasonSelectForm(forms.Form):
SEASON_OPTIONS = (
(year, year) for year in range(2000, datetime.now().year+1)
)
season = forms.ChoiceField(initial=datetime.now().year, choices=SEASON_OPTIONS)
|
[
"andrew@andrewstead.co.uk"
] |
andrew@andrewstead.co.uk
|
f43a8030903ababf84b372f2a71583c30458595b
|
49273a7e6e0d4726f38fab1c430b86dbfc4b2345
|
/leetcode/p49.py
|
ffbeedf32f08e603a01ecbb7f7ff820bb024d919
|
[] |
no_license
|
adqz/interview_practice
|
d16d8c56436dde1f7fa96dc0d8dcc827295e0ff0
|
f55fb9c0a39c2482c98cc452c185a938a59ad57c
|
refs/heads/master
| 2023-01-11T01:30:03.353498
| 2023-01-03T14:48:08
| 2023-01-03T14:48:08
| 207,520,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
'''
@author: adnan
Problem No. 98. Validate Binary Search Tree (Medium)
Runtime: 40 ms, faster than 96.02% of Python3 online submissions for Validate Binary Search Tree.
Memory Usage: 15.1 MB, less than 100.00% of Python3 online submissions for Validate Binary Search Tree.
'''
from typing import List
import tree_visualizer
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isValidBST(self, root: TreeNode, Min=None, Max=None) -> bool:
if root:
if Min!=None and root.val <= Min:
return False
if Max!=None and root.val >= Max:
return False
if root.left and not(self.isValidBST(root.left, Min, root.val)):
return False
if root.right and not(self.isValidBST(root.right, root.val, Max)):
return False
return True
if __name__ == '__main__':
sol = Solution()
root = tree_visualizer.deserialize('[5,1,4,null,null,3,6]')
ans = sol.isValidBST(root)
print(f'ans = {ans}')
root = tree_visualizer.deserialize('[2,1,3]')
ans = sol.isValidBST(root)
print(f'ans = {ans}')
root = tree_visualizer.deserialize('[1,null,1]')
ans = sol.isValidBST(root)
print(f'ans = {ans}')
tree_visualizer.drawtree(root)
|
[
"ashahpur@eng.ucsd.edu"
] |
ashahpur@eng.ucsd.edu
|
07a49fac0cb7ec2461404a59bf4502820bac0d55
|
fcccdb133bf5611c69781080fdbcbb9f4b70c222
|
/input/parameters/lithium-ion/experimental_functions/electrolyte_conductivity.py
|
40a418d3a98ec2576698a3f0b4f8afde213c94fe
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
galvanic653960572/PyBaMM
|
d04036e9e0fec12ceb1d9b4b50cfb3bcfe25f3f1
|
4869d358b3452c7ca627d713823a67fdfdafa4bd
|
refs/heads/master
| 2020-04-09T05:04:28.951580
| 2018-12-03T15:20:44
| 2018-12-03T15:21:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
def lfp_cond(c):
"""
Conductivity of LiPF6 in EC:DMC as in Newman's Dualfoil code. This
function is in dimensional form.
Parameters
----------
c: double
lithium-ion concentration
"""
c = c / 1000
sigma_e = 0.0911 + 1.9101 * c - 1.052 * c ** 2 + 0.1554 * c ** 3
return sigma_e
|
[
"valentinsulzer@hotmail.com"
] |
valentinsulzer@hotmail.com
|
e08622954352b9e1b6540769c6217dd480ef770c
|
7ab41799fd38489c93282f1beb3b20e7ef8ff165
|
/python/111.py
|
ec32bd42ef24b2c21f576226dc84e6a8613f5d9a
|
[] |
no_license
|
scturtle/leetcode-sol
|
86c4095df6b31a9fcad683f2d63669ce1691633c
|
e1a9ce5d9b8fe4bd11e50bd1d5ba1933de845db7
|
refs/heads/master
| 2020-04-23T00:01:37.016267
| 2015-11-21T04:15:27
| 2015-11-21T04:15:27
| 32,385,573
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
if root.left is None:
return 1 + self.minDepth(root.right)
if root.right is None:
return 1 + self.minDepth(root.left)
return 1 + min(self.minDepth(root.left), self. minDepth(root.right))
|
[
"scturtle@gmail.com"
] |
scturtle@gmail.com
|
740243c8c2e06db15307652ccdf16cb6d4e8ecf1
|
79e0f8d64cb928ccc6a026b32dcbb3c8fcefa2de
|
/script/compile-coffee.py
|
179931a5db0a31bf159caccbe6107c6d847aaeef
|
[
"MIT"
] |
permissive
|
mapbox/atom-shell
|
939bca5dec3f6cf7460a3b34d0517e78ed0da928
|
2125a0be826170c3a84af1e75572b06402f3add9
|
refs/heads/master
| 2023-06-05T10:46:29.802386
| 2014-08-04T16:05:26
| 2014-08-04T16:05:26
| 22,614,165
| 1
| 6
|
MIT
| 2023-04-09T15:03:19
| 2014-08-04T17:58:00
| null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
#!/usr/bin/env python
import os
import subprocess
import sys
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
WINDOWS_NODE_PATHs = [
'C:/Program Files/nodejs/node.exe',
'C:/Program Files (x86)/nodejs/node.exe',
]
def main():
input_file = sys.argv[1]
output_dir = os.path.dirname(sys.argv[2])
coffee = os.path.join(SOURCE_ROOT, 'node_modules', 'coffee-script', 'bin',
'coffee')
if sys.platform in ['win32', 'cygwin']:
node = find_node()
if not node:
print 'Node.js is required for building atom-shell'
return 1
subprocess.check_call(['node', coffee, '-c', '-o', output_dir, input_file],
executable=node)
else:
subprocess.check_call(['node', coffee, '-c', '-o', output_dir, input_file])
def find_node():
for path in WINDOWS_NODE_PATHs:
if os.path.exists(path):
return path
return None
if __name__ == '__main__':
sys.exit(main())
|
[
"zcbenz@gmail.com"
] |
zcbenz@gmail.com
|
f33c5c5eacd1cacf4b21708ba3c979e5958862da
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py
|
1a90b9ca274de0d0f5104b2b0f1741b2778accc7
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
[
"chenyong84@huawei.com"
] |
chenyong84@huawei.com
|
63de0a760734ad6f525a1164cff32a69663a6382
|
7d43ba52d958537905cfdde46cc194a97c45dc56
|
/WEB/Networks/Organizations/Registrars/RIR.py
|
332105180c0a42d4b6aa3d37dd9dc1d0d59402e4
|
[] |
no_license
|
Koshmatova/workbook
|
3e4d1f698a01f2be65c1abc83ee251ebc8a6bbcd
|
902695e8e660689a1730c23790dbdc51737085c9
|
refs/heads/master
| 2023-05-01T02:30:46.868027
| 2021-05-10T03:49:08
| 2021-05-10T03:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
RIR
#regional internet Registry
РЕГИСТРАТОРЫ
#получают блоки и номера автономных сисм у IANA
#выдают блоки LIR
ARIN
#обслуживает
Северную Америку
Багамы
Пуэро-Рико
Ямайку
APNIC
#обслуживает
Южную Азию
Восточную Азию
Юго-Восточную Азию
Австралию
Океанию
AfriNIC
#обслуживает
Африку
Страны Индийского Океана
LACNIC
#обслуживает
Южную Америку
Страны бассейна Карибского моря
RIPE NCC
#обслуживает
Европу
Центральную Азию
Ближний восток
|
[
"mkone112@gmail.com"
] |
mkone112@gmail.com
|
3d5ca3e81b309d799bafa57120c402ad3bbbaa20
|
ad16b0c0178e4543d0c44ad3d90f90c6beeb4f5a
|
/di2.py
|
cb2a72a47cbef1e402302ce534f3005b82c52464
|
[] |
no_license
|
timmonspatrick/HemoDub
|
09cb61e8e33ee8b64c9e6011d4ae8679d07950d9
|
4e6cceb44456c498cc1d6d55f8369099d0d5d947
|
refs/heads/master
| 2021-04-27T09:34:40.935684
| 2018-05-31T08:29:04
| 2018-05-31T08:29:04
| 122,491,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,278
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 15:02:19 2017
@author: Patrick
"""
from __future__ import print_function
import numpy as np
from residue_distribution import alphabets
conjoint_letters = ["A", "I", "Y", "H", "R", "D", "C"]
aa_letters = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
di_letters = ["%s%s" % (a, b) for a in aa_letters for b in aa_letters]
di_conjoint_letters = ["%s%s" % (a, b) for a in conjoint_letters for b in conjoint_letters]
di3_letters = ["%s%s%s" % (a, b, c) for a in conjoint_letters for b in conjoint_letters for c in conjoint_letters]
def counter(string_list):
'''
A function for counting the number of letters present.
Returns a list of (letter, #occurances) tuples.
string_list eg. ["HW", "WA", "AL", "LS", ...]
'''
l = max(1, len(string_list))
d = {i : 0 for i in di_letters}
for s in string_list:
try:
d[s] += 1.0
except KeyError:
d[s] = 1.0
d = {k : d[k]/(l) for k in d}
return d
def counter3(string):
'''
A function for counting the number of letters present.
Returns a list of (letter, #occurances) tuples.
'''
l = max(1, len(string))
d = {i : 0 for i in di3_letters}
for s in string:
try:
d[s] += 1.0
except KeyError:
d[s] = 1.0
d = {k : d[k]/(l) for k in d}
return d
def residue_distribution2(all_residues, alphabet):
'''
Takes as arguments a string with letters, and the type of sequence represented.
Returns an alphabetically ordered string of relative frequencies, correct to three decimal places.
'''
d = counter(all_residues)
di2_counts = list(sorted([(i, d[i]) for i in alphabet ])) ##Removes ambiguous letters
r_c = [i[1] for i in di2_counts]
dis = np.array([r_c,])
return dis
def residue_distribution3(all_residues):
'''
Takes as arguments a string with letters, and the type of sequence represented.
Returns an alphabetically ordered string of relative frequencies, correct to three decimal places.
'''
d = counter3(all_residues)
di3_counts = list(sorted([(i, d[i]) for i in di3_letters ])) ##Removes ambiguous letters
r_c = [i[1] for i in di3_counts]
dis = np.array([r_c,])
return dis
def di2(seq, alphabet="aa"):
'''
A function to return all the di2s for a sequence.
Eg. ABCDEF --> AD, BE, CF
'''
l = []
for a in range(len(seq)):
try:
x = "%s%s" % (seq[a], seq[a + 3 ])
l.append(x)
except IndexError:
pass
return residue_distribution2(l, alphabets[alphabet][2])
def di3(seq):
'''
A function to return all the di3s for a sequence.
Eg. ABCDEFGHI --> ADG, BEH, CFI
'''
l = []
for a in range(len(seq)):
try:
x = "%s%s%s" % (seq[a], seq[a + 3 ], seq[a + 6])
l.append(x)
except IndexError:
pass
return residue_distribution3(l)
|
[
"timmons.patrick@outlook.com"
] |
timmons.patrick@outlook.com
|
9e47a121f146030bb57a04733eda6fd89bd415c2
|
512f48fdcfa78e322526cf47163110009b84bf73
|
/test/test_page_of_policy_asset.py
|
b94a86aae703c7f17bf5ff820a90730167232177
|
[
"MIT"
] |
permissive
|
confluentinc/vm-console-client-python
|
9a0f540c0113acf68ee9dc914715bc255e4d99f4
|
ccbd944a0e0333c73e098b769fe4c82755d29874
|
refs/heads/master
| 2023-07-18T10:33:58.909287
| 2021-09-02T20:52:20
| 2021-09-02T20:52:20
| 402,559,283
| 0
| 0
|
MIT
| 2021-09-02T20:49:56
| 2021-09-02T20:49:56
| null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
# coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.page_of_policy_asset import PageOfPolicyAsset # noqa: E501
from swagger_client.rest import ApiException
class TestPageOfPolicyAsset(unittest.TestCase):
"""PageOfPolicyAsset unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPageOfPolicyAsset(self):
"""Test PageOfPolicyAsset"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.page_of_policy_asset.PageOfPolicyAsset() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"zachary_youtz@rapid7.com"
] |
zachary_youtz@rapid7.com
|
93f00f64a3499f2739b654603d11c63bd28647f0
|
c36bd73ddbf668b25908df4ed2d4729d3ea792a7
|
/venv/lib/python3.5/site-packages/facebook_business/adobjects/favoriterequest.py
|
29354ce89e9dd9e997aa5312a28ef6b0f0f1c467
|
[] |
no_license
|
Suraj-KD/AbsentiaVR_Task
|
1e236f88063b97666c8e188af7fddc0fd7ea3792
|
1de364e0464ac79cefc26077318021570993d713
|
refs/heads/master
| 2022-12-10T20:40:31.396380
| 2019-01-31T19:19:36
| 2019-01-31T19:19:36
| 168,398,946
| 0
| 0
| null | 2022-12-08T01:34:30
| 2019-01-30T19:08:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,371
|
py
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class FavoriteRequest(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isFavoriteRequest = True
super(FavoriteRequest, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
api_version = 'api_version'
description = 'description'
graph_path = 'graph_path'
hash = 'hash'
http_method = 'http_method'
id = 'id'
post_params = 'post_params'
query_params = 'query_params'
class HttpMethod:
get = 'GET'
post = 'POST'
delete = 'DELETE'
class ApiVersion:
unversioned = 'unversioned'
v1_0 = 'v1.0'
v2_0 = 'v2.0'
v2_1 = 'v2.1'
v2_2 = 'v2.2'
v2_3 = 'v2.3'
v2_4 = 'v2.4'
v2_5 = 'v2.5'
v2_6 = 'v2.6'
v2_7 = 'v2.7'
v2_8 = 'v2.8'
v2_9 = 'v2.9'
v2_10 = 'v2.10'
v2_11 = 'v2.11'
v2_12 = 'v2.12'
v3_0 = 'v3.0'
v3_1 = 'v3.1'
v3_2 = 'v3.2'
v4_0 = 'v4.0'
def api_delete(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='DELETE',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=FavoriteRequest,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'api_version': 'string',
'description': 'string',
'graph_path': 'string',
'hash': 'string',
'http_method': 'HttpMethod',
'id': 'string',
'post_params': 'list<Object>',
'query_params': 'list<Object>',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['HttpMethod'] = FavoriteRequest.HttpMethod.__dict__.values()
field_enum_info['ApiVersion'] = FavoriteRequest.ApiVersion.__dict__.values()
return field_enum_info
|
[
"surajdubey302@gmail.com"
] |
surajdubey302@gmail.com
|
a5a67527ca645c09a5eda7d574e9c5751ba8d7c7
|
bcc04939aa70675c9be19c0bf4a9642877db46b1
|
/qa/admin.py
|
fb3490662c6887235e08bad516912b69be9dcd6b
|
[
"MIT"
] |
permissive
|
zkeshtkar/gapbug
|
164398e2ddd8f952d5851eab19e34f9f84a080e1
|
eec5baf9b4346aef26bcb10e48ddcb358140d708
|
refs/heads/main
| 2023-06-20T07:39:50.084126
| 2021-07-16T13:31:10
| 2021-07-16T13:31:10
| 387,550,452
| 0
| 0
|
MIT
| 2021-07-19T17:53:20
| 2021-07-19T17:53:19
| null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from django.contrib import admin
from .models import Question
class QuestionAdmin(admin.ModelAdmin):
list_display = ("title", "user")
admin.site.register(Question, QuestionAdmin)
|
[
"mshirdel@gmail.com"
] |
mshirdel@gmail.com
|
1b961f70e65bde6e2cf6a2d5479f8a1db3b842ef
|
7ecc7092c70d28cfbc4229aca95267673f3b2b57
|
/quru/server/mq_client.py
|
cab3ad58929c75dac7fb68c377ee1f2f1f96f8ad
|
[
"MIT"
] |
permissive
|
ShawnHan1993/quru
|
403bd6499a18901a02378eae82da73f828090107
|
6b103a54d8228e4e2d44b06cc068c60a44b02d67
|
refs/heads/main
| 2023-07-17T19:38:43.855085
| 2021-09-03T01:40:25
| 2021-09-03T14:07:11
| 400,223,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,594
|
py
|
import asyncio
import time
import typing
import aio_pika
import pika
import aiormq
from ..quru_logger import logger
from ..env import (BROADCAST_EXCHANGE_NAME, MAIN_EXCHANGE_NAME, MQ_HOST,
MQ_PORT, MQ_RETRY, RABBITMQ_PASSWORD, RABBITMQ_USERNAME,
RPC_EXCHANGE_NAME)
class BaseMqClient:
def __init__(self,
mq_host=MQ_HOST,
mq_port=MQ_PORT,
mq_username=RABBITMQ_USERNAME,
mq_password=RABBITMQ_PASSWORD,
retry=MQ_RETRY):
self._mq_host = mq_host
self._mq_port = mq_port
self._mq_username = mq_username
self._mq_password = mq_password
self._URL = 'amqp://{}:{}@{}:{}'.format(
self._mq_username,
self._mq_password,
self._mq_host,
self._mq_port)
self._param = pika.URLParameters(self._URL)
self._retry = retry
def connect(self) -> pika.BlockingConnection:
if self._retry == 0:
upper_bound = float('inf')
else:
upper_bound = self._retry
counter = 0
while counter < upper_bound:
try:
connection = pika.BlockingConnection(self._param)
break
except Exception:
time.sleep(10)
counter += 1
else:
raise TimeoutError('connect failed.')
if connection is None:
raise ConnectionError
logger.info("Succeded_in_connecting_MQ.")
return connection
class AsyncMqClient(BaseMqClient):
'''Async MQ logic wrapper.
'''
EXCHANGE_PROPERTY = {
MAIN_EXCHANGE_NAME: {
"type": aio_pika.ExchangeType.DIRECT
},
RPC_EXCHANGE_NAME: {
"type": aio_pika.ExchangeType.DIRECT
},
BROADCAST_EXCHANGE_NAME: {
"type": aio_pika.ExchangeType.TOPIC
},
}
def __init__(self,
loop,
mq_host=MQ_HOST,
mq_port=MQ_PORT,
mq_username=RABBITMQ_USERNAME,
mq_password=RABBITMQ_PASSWORD,
retry=MQ_RETRY):
super().__init__(mq_host=MQ_HOST,
mq_port=MQ_PORT,
mq_username=RABBITMQ_USERNAME,
mq_password=RABBITMQ_PASSWORD,
retry=MQ_RETRY)
self._connection = None
self._loop = loop
self._q_pool = {}
async def setup(self):
await self._async_connect()
self._pub_channel = \
await self._connection.channel(publisher_confirms=False)
async def _async_connect(self) -> aio_pika.RobustConnection:
if self._retry == 0:
upper_bound = float('inf')
else:
upper_bound = self._retry
counter = 0
while counter < upper_bound:
try:
self._connection = aio_pika.RobustConnection(
self._URL,
loop=self._loop
)
await self._connection.connect()
break
except Exception:
await asyncio.sleep(10)
counter += 1
else:
raise TimeoutError('connect failed.')
if self._connection is None:
raise ConnectionError
logger.info("Succeded_in_connecting_MQ.")
return self._connection
async def publish(self, **kwargs):
err = None
for i in range(3):
try:
await self._pub_channel.channel.basic_publish(**kwargs)
break
except Exception as e:
err = e
await self.setup()
else:
raise err
async def declare_exchange(self,
name,
type,
arguments=None,
bind_exchange=None,
routing_key=None,
channel=None):
'''A broker function to declare an exchange. This function abstract out
a lot details of communicating with the mq server.
'''
if channel is None:
channel: aio_pika.Channel = await self._connection.channel(
publisher_confirms=False)
exchange = await channel.declare_exchange(
name=name, type=type,
arguments=arguments)
if bind_exchange is not None:
assert routing_key is not None
await channel.declare_exchange(
name=bind_exchange,
**self.EXCHANGE_PROPERTY[bind_exchange])
await exchange.bind(bind_exchange, routing_key=routing_key)
return exchange, channel
async def declare_queue(
self,
name,
bind_exchange,
routing_key,
callback,
prefetch_count,
arguments=None,
auto_delete=False,
exclusive=False,
no_ack=True,
channel=None,
consumer_tag=None
) -> typing.Tuple[aio_pika.Queue, aio_pika.Channel, str]:
'''A broker function to declare a queue. This function abstract out
a lot details of communicating with the mq server.
'''
if name in self._q_pool:
queue, channel, consumer_tag = self._q_pool[name]
else:
if channel is None:
channel: aio_pika.Channel = await self._connection.channel(
publisher_confirms=False)
if arguments is None:
arguments = {}
arguments["x-max-length"] = 30000
queue: aio_pika.Queue = await channel.declare_queue(
name=name,
arguments=arguments,
auto_delete=auto_delete,
exclusive=exclusive)
self._q_pool[queue.name] = consumer_tag
try:
await channel.set_qos(prefetch_count=prefetch_count)
consumer_tag = await queue.consume(
callback, no_ack=no_ack,
consumer_tag=consumer_tag)
except aiormq.exceptions.DuplicateConsumerTag:
pass
self._q_pool[queue.name] = (queue, channel, consumer_tag)
await queue.bind(bind_exchange, routing_key=routing_key)
return queue, channel, consumer_tag
async def close(self):
if self._connection is None:
return
await self._connection.close()
|
[
"shawnhan1029@gmail.com"
] |
shawnhan1029@gmail.com
|
2d7c95b9adc03370debcc3242e5f0acf53ab7a6f
|
020fbf1db497520abcb30cd3889cfe61c601723f
|
/practice/readmodclass.py
|
491a456b4862d0be7e5870f5afe2748e2fdea45c
|
[] |
no_license
|
bluecrt/first-project
|
bc53a49ae23c3cc6beb0ede4d00d94e4ad568771
|
7f5f376adcaa0d48caf4540db6613f843d40e173
|
refs/heads/master
| 2023-06-21T19:16:46.644489
| 2021-07-22T13:13:15
| 2021-07-22T13:13:15
| 386,676,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
import classcars
car = classcars.Cars('red', 'big')
print('your car\'s color is:{},shape is:{},now you can drive it。'.format(car.color, car.shape))
|
[
"email"
] |
email
|
a6fb58c89014ee062dd4f0919c71670b7ccf61ec
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/audio/FastSpeech/src/dataset.py
|
e72e66213e867feab300003b59edf5352fb51bbb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,247
|
py
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data preprocessing."""
import os
from pathlib import Path
import numpy as np
from mindspore import Tensor
from mindspore import dtype as mstype
from src.cfg.config import config as hp
from src.text import text_to_sequence
from src.utils import pad_1d_tensor
from src.utils import pad_2d_tensor
from src.utils import process_text
def get_data_to_buffer():
"""
Put data to memory, for faster training.
"""
with Path(hp.dataset_path, 'train_indices.txt').open('r') as file:
train_part = np.array([i[:-1] for i in file.readlines()], np.int32)
train_part.sort()
buffer = list()
raw_text = process_text(os.path.join(hp.dataset_path, "metadata.txt"))
for i in train_part:
mel_gt_name = os.path.join(hp.dataset_path, 'mels', "ljspeech-mel-%05d.npy" % (i+1))
mel_gt_target = np.load(mel_gt_name)
duration = np.load(os.path.join(hp.dataset_path, 'alignments', str(i)+".npy"))
character = raw_text[i][: len(raw_text[i])-1]
character = np.array(text_to_sequence(character, hp.text_cleaners))
buffer.append(
{
"text": character,
"duration": duration,
"mel_target": mel_gt_target
}
)
return buffer
def reprocess_tensor(data_dict):
"""
Prepare data for training.
Apply padding for all samples, in reason of static graph.
Args:
data_dict (dict): Dictionary of np.array type data.
Returns:
out (dict): Dictionary with prepared data for training, np.array type.
"""
text = data_dict["text"]
mel_target = data_dict["mel_target"]
duration = data_dict["duration"]
max_len = hp.character_max_length
length_text = text.shape[0]
src_pos = np.pad([i+1 for i in range(int(length_text))], (0, max_len-int(length_text)), 'constant')
max_mel_len = hp.mel_max_length
length_mel = mel_target.shape[0]
mel_pos = np.pad([i+1 for i in range(int(length_mel))], (0, max_mel_len-int(length_mel)), 'constant')
text = pad_1d_tensor(text)
duration = pad_1d_tensor(duration)
mel_target = pad_2d_tensor(mel_target)
out = {
"text": text, # shape (hp.character_max_length)
"src_pos": src_pos, # shape (hp.character_max_length)
"mel_pos": mel_pos, # shape (hp.mel_max_length)
"duration": duration, # shape (hp.character_max_length)
"mel_target": mel_target, # shape (hp.mel_max_length, hp.num_mels)
"mel_max_len": max_mel_len,
}
return out
def preprocess_data(buffer):
"""
Prepare data for training.
Args:
buffer (list): Raw data inputs.
Returns:
preprocessed_data (list): Padded and converted data, ready for training.
"""
preprocessed_data = []
for squeeze_data in buffer:
db = reprocess_tensor(squeeze_data)
preprocessed_data.append(
(
db["text"].astype(np.float32),
db["src_pos"].astype(np.float32),
db["mel_pos"].astype(np.float32),
db["duration"].astype(np.int32),
db["mel_target"].astype(np.float32),
db["mel_max_len"],
)
)
return preprocessed_data
class BufferDataset:
"""
Dataloader.
"""
def __init__(self, buffer):
self.length_dataset = len(buffer)
self.preprocessed_data = preprocess_data(buffer)
def __len__(self):
return self.length_dataset
def __getitem__(self, idx):
return self.preprocessed_data[idx]
def get_val_data(data_url):
"""Get validation data."""
data_list = list()
with Path(data_url, 'validation.txt').open('r') as file:
data_paths = file.readlines()
root_wav_path = os.path.join(data_url, 'wavs')
wav_paths = [root_wav_path + '/' + raw_path.split('|')[0] + '.wav' for raw_path in data_paths]
val_txts = [raw_path.split('|')[1][:-1] for raw_path in data_paths]
for orig_text, wav_path in zip(val_txts, wav_paths):
sequence = text_to_sequence(orig_text, hp.text_cleaners)
sequence = np.expand_dims(sequence, 0)
src_pos = np.array([i + 1 for i in range(sequence.shape[1])])
src_pos = np.expand_dims(src_pos, 0)
sequence = Tensor([np.pad(sequence[0], (0, hp.character_max_length - sequence.shape[1]))], mstype.float32)
src_pos = Tensor([np.pad(src_pos[0], (0, hp.character_max_length - src_pos.shape[1]))], mstype.float32)
data_list.append([sequence, src_pos, wav_path])
return data_list
|
[
"a.denisov@expasoft.tech"
] |
a.denisov@expasoft.tech
|
891767424910c6312d21ec2883cabb1ee174ed30
|
094a82883b0f4490dbca6c042e129faf0593d7bc
|
/thingflow/filters/timeout.py
|
714092327ac119541bf3e1f1efb166cdb5bf6395
|
[
"Apache-2.0"
] |
permissive
|
kesking82/thingflow-python
|
904495aa370fb0fdef5e1eb162f0553a37bd7271
|
4c00deafd1bf425ec90ef2159fc5f3ea2553ade8
|
refs/heads/master
| 2020-04-21T13:05:57.615247
| 2019-02-28T09:59:13
| 2019-02-28T09:59:13
| 169,587,091
| 0
| 0
|
Apache-2.0
| 2019-02-07T14:44:45
| 2019-02-07T14:44:44
| null |
UTF-8
|
Python
| false
| false
| 4,506
|
py
|
# Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Timeout-related output things and filters.
"""
from thingflow.base import OutputThing, DirectOutputThingMixin, FunctionFilter,\
FatalError, filtermethod
class Timeout(OutputThing, DirectOutputThingMixin):
"""An output thing that can shedule timeouts for itself. When a
timeout occurs, an event is sent on the default port.
The timeout_thunk is called to get the actual event.
"""
def __init__(self, scheduler, timeout_thunk):
super().__init__()
self.scheduler = scheduler
self.timeout_thunk = timeout_thunk
self.cancel = None
def start(self, interval):
if self.cancel:
self.cancel()
self.cancel = self.scheduler.schedule_later_one_time(self, interval)
def clear(self):
if self.cancel:
self.cancel()
self.cancel = None
def _observe(self):
"""If this gets called, we hit the timeout
"""
self.cancel = None
self._dispatch_next(self.timeout_thunk())
class EventWatcher:
"""Watch the event stream and then produce an event for a timeout
when asked. This can be subclassed to implement different
policies.
"""
def on_next(self, x):
pass # we get a regular event
def produce_event_for_timeout(self):
return None # return the timeout event
def close(self): # called for on_completed or on_error
pass
class SupplyEventWhenTimeout(FunctionFilter):
"""This filter sits in a chain and passes incoming events through to
its output. It also passes all events to the on_next() method of the
event watcher. If no event arrives on the input after the interval has
passed since the last event, event_watcher.produce_event_for_timeout()
is called to get a dummy event, which is passed upstream.
"""
def __init__(self, previous_in_chain, event_watcher, scheduler, interval):
self.event_watcher = event_watcher
self.timeout_thing = \
Timeout(scheduler, self.event_watcher.produce_event_for_timeout)
self.interval = interval
def on_next(self, x):
self.event_watcher.on_next(x)
# reset the timer
self.timeout_thing.start(self.interval)
self._dispatch_next(x)
def on_completed(self):
self.event_watcher.close()
self.timeout_thing.clear()
self._dispatch_completed()
def on_error(self, e):
self.event_watcher.close()
self.timeout_thing.clear()
self._dispatch_error(e)
super().__init__(previous_in_chain, on_next=on_next,
on_completed=on_completed, on_error=on_error,
name='supply_event_when_timeout')
# pass the timeout_thing's timeout events to my on_timeout_next()
# method<
self.timeout_thing.connect(self,
port_mapping=('default','timeout'))
# We start the timeout now - if we don't get a first event from the
# input within the timeout, we should supply a timeout event. This
# timeout won't start counting down until we start the scheduler.
self.timeout_thing.start(interval)
def on_timeout_next(self, x):
"""This method is connected to the Timeout thing's output. If it
gets called, the timeout has fired. We need to reschedule the timeout
as well, so that we continue to produce events in the case of multiple
consecutive timeouts.
"""
self.timeout_thing.start(self.interval)
self._dispatch_next(x)
def on_timeout_error(self, e):
"""This won't get called, as the Timeout thing does not republish any
errors it receives.
"""
raise FatalError("%s.on_timeout_error should not be called" % self)
def on_timeout_completed(self):
"""This won't get called, as the timeout thing does not propate
any completions. We just use the primary event stream to figure out when
things are done and clear any pending timeouts at that time.
"""
raise FatalError("%s.on_timeout_completed should not be called" % self)
@filtermethod(OutputThing)
def supply_event_when_timeout(this, event_watcher, scheduler, interval):
return SupplyEventWhenTimeout(this, event_watcher, scheduler, interval)
|
[
"jeff@data-ken.org"
] |
jeff@data-ken.org
|
958691096444cbd3d3c96fb700112e2199f368ca
|
e86851297175203451374021595659adbd516b59
|
/tools/convert2txt.py
|
eecab18170bcdafe344373ef87bedb7667c58484
|
[
"MIT"
] |
permissive
|
stcolumbas/free-church-psalms
|
f0417d07af449300a5ada758dc95e153712b0e9e
|
0eee5faa19306a79d77a55019ff82fcba72fc9b4
|
refs/heads/master
| 2022-12-16T15:31:44.907547
| 2017-12-08T22:53:40
| 2017-12-08T22:53:40
| 28,723,518
| 2
| 0
| null | 2022-12-07T23:51:49
| 2015-01-02T19:23:24
|
Elm
|
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
import os
from utils import (load_scottish_psalter, load_sing_psalms, make_output_folder,
remove_folder, remove_markup, zip_folder)
def write_text_file(psalm, output_folder, fname):
fname += ".txt"
with open(os.path.join(output_folder, fname), 'w') as f:
text = psalm['name'] + "\r\n" # use windows compat. line breaks
text += psalm['metre'] + "\r\n\r\n"
text += "\r\n\r\n".join(psalm['stanzas'])
if psalm['copyright'] is not None:
text += "\r\n\r\n© " + psalm['copyright']
remove_markup(text)
f.write(text)
def convert2txt():
"""Convert both sets of Psalms to text files and
save in output/plain_text
"""
# sing psalms
output_folder = make_output_folder(["PlainText", "Sing Psalms"])
psalms = load_sing_psalms()
for psalm in psalms:
write_text_file(psalm, output_folder, psalm['file_name'])
# trad psalms
output_folder = make_output_folder(["PlainText", "Scottish Psalter"])
psalms = load_scottish_psalter()
for psalm in psalms:
write_text_file(psalm, output_folder, psalm['file_name'])
zip_folder(os.path.dirname(output_folder))
remove_folder(os.path.dirname(output_folder))
if __name__ == '__main__':
convert2txt()
|
[
"montgomery.dean97@gmail.com"
] |
montgomery.dean97@gmail.com
|
60995d970bc68dc1ec94fb35ac1deb625a4a25b0
|
648f742d6db2ea4e97b83c99b6fc49abd59e9667
|
/common/vault/oas/models/v1_release.py
|
390e9e08ca27c8e6365b47a2aae883c73a864c3e
|
[] |
no_license
|
jmiller-tm/replit
|
c56ce63718f6eb2d9b53bd09d3f7b3ef3496cb86
|
c8e6af3268c4ef8da66516154850919ea79055dc
|
refs/heads/main
| 2023-08-30T00:49:35.738089
| 2021-11-16T23:09:08
| 2021-11-16T23:09:08
| 428,809,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,968
|
py
|
# coding: utf-8
"""
vault/kernel/core_api/proto/v1/accounts/core_api_account_schedule_tags.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1Release(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'str',
'denomination': 'str',
'target_account_id': 'str',
'internal_account_id': 'str'
}
attribute_map = {
'amount': 'amount',
'denomination': 'denomination',
'target_account_id': 'target_account_id',
'internal_account_id': 'internal_account_id'
}
def __init__(self, amount=None, denomination=None, target_account_id=None, internal_account_id=None): # noqa: E501
"""V1Release - a model defined in Swagger""" # noqa: E501
self._amount = None
self._denomination = None
self._target_account_id = None
self._internal_account_id = None
self.discriminator = None
if amount is not None:
self.amount = amount
if denomination is not None:
self.denomination = denomination
if target_account_id is not None:
self.target_account_id = target_account_id
if internal_account_id is not None:
self.internal_account_id = internal_account_id
@property
def amount(self):
"""Gets the amount of this V1Release. # noqa: E501
The amount released. # noqa: E501
:return: The amount of this V1Release. # noqa: E501
:rtype: str
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this V1Release.
The amount released. # noqa: E501
:param amount: The amount of this V1Release. # noqa: E501
:type: str
"""
self._amount = amount
@property
def denomination(self):
"""Gets the denomination of this V1Release. # noqa: E501
The instruction release denomination. # noqa: E501
:return: The denomination of this V1Release. # noqa: E501
:rtype: str
"""
return self._denomination
@denomination.setter
def denomination(self, denomination):
"""Sets the denomination of this V1Release.
The instruction release denomination. # noqa: E501
:param denomination: The denomination of this V1Release. # noqa: E501
:type: str
"""
self._denomination = denomination
@property
def target_account_id(self):
"""Gets the target_account_id of this V1Release. # noqa: E501
The instruction `target_account_id`. # noqa: E501
:return: The target_account_id of this V1Release. # noqa: E501
:rtype: str
"""
return self._target_account_id
@target_account_id.setter
def target_account_id(self, target_account_id):
"""Sets the target_account_id of this V1Release.
The instruction `target_account_id`. # noqa: E501
:param target_account_id: The target_account_id of this V1Release. # noqa: E501
:type: str
"""
self._target_account_id = target_account_id
@property
def internal_account_id(self):
"""Gets the internal_account_id of this V1Release. # noqa: E501
The instruction `internal_account_id`. # noqa: E501
:return: The internal_account_id of this V1Release. # noqa: E501
:rtype: str
"""
return self._internal_account_id
@internal_account_id.setter
def internal_account_id(self, internal_account_id):
"""Sets the internal_account_id of this V1Release.
The instruction `internal_account_id`. # noqa: E501
:param internal_account_id: The internal_account_id of this V1Release. # noqa: E501
:type: str
"""
self._internal_account_id = internal_account_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1Release, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Release):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"jmiller@jmiller-tm00769-mbp.nomad.thomac.net"
] |
jmiller@jmiller-tm00769-mbp.nomad.thomac.net
|
24eec6af70636c91b4a73525876f8dd6d1baaa4a
|
d932716790743d0e2ae7db7218fa6d24f9bc85dc
|
/net/data/verify_certificate_chain_unittest/expired-root/generate-chains.py
|
93d5bb72522e7d46dc7a0d48bd93b24afe02747a
|
[
"BSD-3-Clause"
] |
permissive
|
vade/chromium
|
c43f0c92fdede38e8a9b858abd4fd7c2bb679d9c
|
35c8a0b1c1a76210ae000a946a17d8979b7d81eb
|
refs/heads/Syphon
| 2023-02-28T00:10:11.977720
| 2017-05-24T16:38:21
| 2017-05-24T16:38:21
| 80,049,719
| 19
| 3
| null | 2017-05-24T19:05:34
| 2017-01-25T19:31:53
| null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
#!/usr/bin/python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with a root, intermediate and target. The root has a
smaller validity range than the other certificates, making it easy to violate
just its validity.
Root: 2015/03/01 -> 2015/09/01
Intermediate: 2015/01/01 -> 2016/01/01
Target: 2015/01/01 -> 2016/01/01
"""
import sys
sys.path += ['..']
import common
# Self-signed root certificate.
root = common.create_self_signed_root_certificate('Root')
root.set_validity_range(common.MARCH_1_2015_UTC, common.SEPTEMBER_1_2015_UTC)
# Intermediate certificate.
intermediate = common.create_intermediate_certificate('Intermediate', root)
intermediate.set_validity_range(common.JANUARY_1_2015_UTC,
common.JANUARY_1_2016_UTC)
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediate)
target.set_validity_range(common.JANUARY_1_2015_UTC, common.JANUARY_1_2016_UTC)
chain = [target, intermediate, root]
common.write_chain(__doc__, chain, 'chain.pem')
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
897de58d13bf05f061a944969efdd949a05fd250
|
70d3a0b9592d67627613806361996848cbdf6e81
|
/tribune/urls.py
|
743c5c2d51d89d1e34935298e5819d997537bf86
|
[] |
no_license
|
markmurimi/moringa-tribune
|
38c85bfbde40b0f6540f9c0c33150f8d8cb2672e
|
51ced550760bfcf05c97889cbef5c891ed33c8e1
|
refs/heads/master
| 2020-03-15T11:07:25.408649
| 2018-05-18T05:25:23
| 2018-05-18T05:25:23
| 132,112,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^api-token-auth/', obtain_auth_token),
url(r'^admin/', admin.site.urls),
url(r'',include('news.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
]
|
[
"murimimg180@gmail.com"
] |
murimimg180@gmail.com
|
a8f12a818bd905bf27433ceb1d88cd1adcb6fd03
|
bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd
|
/neekanee/job_scrapers/plugins/org/link/ecri.py
|
454de8cc4ab4aed3dae40d464a9b0baad832bcf6
|
[] |
no_license
|
thayton/neekanee
|
0890dd5e5cf5bf855d4867ae02de6554291dc349
|
f2b2a13e584469d982f7cc20b49a9b19fed8942d
|
refs/heads/master
| 2021-03-27T11:10:07.633264
| 2018-07-13T14:19:30
| 2018-07-13T14:19:30
| 11,584,212
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
import re, urlparse, urllib
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text
from neekanee_solr.models import *
COMPANY = {
'name': 'Economic Cycle Research Institute - ECRI',
'hq': 'Plymouth Meeting, PA',
'home_page_url': 'http://www.ecri.org',
'jobs_page_url': 'https://careers.ecri.org/',
'empcnt': [201,500]
}
class EcriJobScraper(JobScraper):
def __init__(self):
super(EcriJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
s = soupify(self.br.response().read())
x = {'class': 'JobLink'}
for a in s.findAll('a', attrs=x):
job = Job(company=self.company)
job.title = a.text
job.url = urlparse.urljoin(self.br.geturl(), a['href'])
job.location = self.company.location
jobs.append(job)
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
t = s.find('table', id='CRCareers1_tblJobDescrDetail')
job.desc = get_all_text(t)
job.save()
def get_scraper():
return EcriJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
|
[
"thayton@neekanee.com"
] |
thayton@neekanee.com
|
76cdb6dfccc2ed384bfad32b928caa9558191f83
|
9eee1566e436a883fa3bd0266c6a7375e34ab4d7
|
/notes/cli/commands/document/modify.py
|
14ee5e002dbbd79ebb3e9e1fe2bda613817eda8d
|
[] |
no_license
|
gropax/qiq-django
|
aa87fa070bf2083aba9043ebc96c2287be2cf7e5
|
31f8c6ad717994bd9b37fcdde3fec8549be5aec1
|
refs/heads/master
| 2020-07-09T05:19:13.117991
| 2017-01-10T16:54:52
| 2017-01-10T16:54:52
| 65,904,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
import os
from core.cli.command import Command, command
from notes.cli.utils import Utils
from notes.cli.commands.document import DocumentCommand
@command('modify', DocumentCommand)
class ModifyCommand(Command, Utils):
aliases = ('mod',)
def add_arguments(self, parser):
parser.add_argument('name_or_id', type=str, help='the name or the id of the document')
parser.add_argument('-d', '--description', type=str,
help='the description of the document')
parser.add_argument('-n', '--new-name', type=str,
help='the new name of the document')
parser.add_argument('-f', '--file', type=str,
help='synchronize document with file')
def action(self, args):
name_or_id = args.name_or_id
doc = self.find_document_by_name_or_id_or_error(name_or_id)
old_name, desc_mod, file_mod = None, None, None
new_name = args.new_name
if new_name and new_name != doc.name:
self.check_document_name_is_valid(new_name)
old_name = doc.name
doc.name = new_name
desc = args.description
if desc and desc != doc.description:
desc_mod = True
doc.description = desc
f = None
if args.file:
f = self.absolute_path(args.file)
if f != doc.file:
if os.path.isfile(f):
if not self.ask('File `%s` already exists. Synchronize it anyway ?' % f, default='no'):
self.warning_operation_aborted()
file_mod = True
doc.file = f
self.synchronize_document(doc)
if old_name or desc_mod or file_mod:
doc.save()
self.success_document_modified(doc, old_name, desc_mod or file_mod)
else:
self.warning_nothing_to_do()
|
[
"maximedelaudrin@gmail.com"
] |
maximedelaudrin@gmail.com
|
308c563f76e19aceed010d3587c7917ad4876a05
|
ccfe4eb1a0df72da1ffb35d082ce4eedcf3a55e0
|
/grade/views.py
|
63fda731f48957eefbc2fed69edd739dd1f5d9ec
|
[] |
no_license
|
chydream/djangoDemo
|
a807a8f93f59dee4ecde031388a2cdb5172b3e84
|
94df813dcf3877a46dad572d5421e33862100a0d
|
refs/heads/master
| 2022-07-17T16:34:04.343259
| 2020-05-20T03:29:10
| 2020-05-20T03:29:10
| 258,780,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
from django.db.models import Sum, Max, Count
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from grade.models import Grade, Student
def page_count(request):
# num = Grade.objects.filter(student_name= '张三').aggregate(total=Sum('score'))
# print(num)
# max_num = Grade.objects.filter(subject_name='语文').aggregate(high_score=Max('score'))
# print(max_num)
# sum_num = Grade.objects.values_list('student_name').annotate(Sum('score'))
# print(sum_num)
sum_num = Student.objects.all().annotate(Sum('stu_grade__score'))
print(sum_num)
for item in sum_num:
print(item.student_name, item.stu_grade__score__sum)
zs = Student.objects.get(pk=1)
list = zs.stu_grade.all()
print(list)
return HttpResponse('ok')
|
[
"yong.chen@doone.com.cn"
] |
yong.chen@doone.com.cn
|
ee5485a406a84015222484f56a780b6e480a68cd
|
37ba62db61fc4ec62634638763a984cbfbe40fe3
|
/day40/聊天/client1.py
|
774d118ed33968502dfa2d9a623bffb169cd56ad
|
[] |
no_license
|
lt910702lt/python
|
ca2768aee91882c893a9bc6c1bdd1b455ebd511f
|
c6f13a1a9461b18df17205fccdc28f89854f316c
|
refs/heads/master
| 2020-05-09T22:54:22.587206
| 2019-09-17T09:02:53
| 2019-09-17T09:02:53
| 181,485,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import socket
import time
sk = socket.socket()
sk.connect(('127.0.0.1', 8080))
while True:
sk.send(b'hi')
ret = sk.recv(1024).decode('utf-8')
print(ret)
time.sleep(1)
sk.close()
|
[
"1103631738@qq.com"
] |
1103631738@qq.com
|
f2921abb6806d16d0f7ddc14a97f9baeff9ea3f2
|
f0856e60a095ce99ec3497b3f27567803056ac60
|
/Dacon/01newstopic_4_StratifiedKFold.py
|
4133bad05dab0a9658e9397d51e417687e2a2d98
|
[] |
no_license
|
hjuju/TF_Study-HAN
|
dcbac17ce8b8885f5fb7d7f554230c2948fda9ac
|
c0faf98380e7f220868ddf83a9aaacaa4ebd2c2a
|
refs/heads/main
| 2023-09-04T09:13:33.212258
| 2021-10-27T08:00:49
| 2021-10-27T08:00:49
| 384,371,952
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,418
|
py
|
import numpy as np
import re
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from icecream import ic
from sklearn.metrics import accuracy_score,log_loss
from sklearn.model_selection import StratifiedKFold
import datetime
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, Dropout, Bidirectional
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from tensorflow.keras.utils import plot_model, to_categorical
from tensorflow.keras.optimizers import Adam
path = './Dacon/_data/newstopic/'
train = pd.read_csv(path + 'train_data.csv',header=0, encoding='UTF8')
test = pd.read_csv(path + 'test_data.csv',header=0, encoding='UTF8')
submission = pd.read_csv(path + 'sample_submission.csv')
topic_dict = pd.read_csv(path + 'topic_dict.csv')
# null값 제거
# datasets_train = datasets_train.dropna(axis=0)
# datasets_test = datasets_test.dropna(axis=0)
# x = datasets_train.iloc[:, -2]
# y = datasets_train.iloc[:, -1]
# x_pred = datasets_test.iloc[:, -1]
train['doc_len'] = train.title.apply(lambda words: len(words.split()))
x_train = np.array([x for x in train['title']])
x_predict = np.array([x for x in test['title']])
y_train = np.array([x for x in train['topic_idx']])
def text_cleaning(docs):
for doc in docs:
doc = re.sub("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]", "", doc)
return docs
x = text_cleaning(x_train)
x_predict = text_cleaning(x_predict)
# ic(x.shape) ic| x.shape: (45654,)
# 불용어 제거, 특수문자 제거
# import string
# def define_stopwords(path):
# sw = set()
# for i in string.punctuation:
# sw.add(i)
# with open(path, encoding='utf-8') as f:
# for word in f:
# sw.add(word)
# return sw
# x = define_stopwords(x)
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(x)
sequences_train = tokenizer.texts_to_sequences(x)
sequences_test = tokenizer.texts_to_sequences(x_predict)
#리스트 형태의 빈값 제거 --> 양방향에서는 오류남..
# sequences_train = list(filter(None, sequences_train))
# sequences_test = list(filter(None, sequences_test))
#길이 확인
# x1_len = max(len(i) for i in sequences_train)
# ic(x1_len) # ic| x1_len: 11
# x_pred = max(len(i) for i in sequences_test)
# ic(x_pred) # ic| x_pred: 9
xx = pad_sequences(sequences_train, padding='pre', maxlen = 14)
# ic(xx.shape) ic| xx.shape: (42477, 11)
yy = pad_sequences(sequences_test, padding='pre', maxlen=14)
y = to_categorical(y_train)
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, LSTM, GRU, Bidirectional
# model = Sequential()
# model.add(Embedding(input_dim=101082, output_dim=77, input_length=11))
# model.add(LSTM(128, activation='relu'))
# model.add(Dense(64, activation= 'relu'))
# model.add(Dropout(0.2))
# model.add(Dense(32, activation= 'relu'))
# model.add(Dense(7, activation='softmax'))
model = Sequential([Embedding(101082, 200, input_length =14),
tf.keras.layers.Bidirectional(LSTM(units = 32, return_sequences = True, activation='relu')),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Bidirectional(LSTM(units = 16, return_sequences = True, activation='relu')),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Bidirectional(LSTM(units = 8, activation='relu')),
Dense(7, activation='softmax')])
import datetime
import time
optimizer = Adam(learning_rate=0.0001)
model.compile(loss= 'categorical_crossentropy', optimizer= optimizer, metrics = ['acc'])
date = datetime.datetime.now()
date_time = date.strftime('%m%d_%H%M')
cvpath = './Dacon/_save/skfoldmcp/'
info = '{epoch:02d}_{val_loss:.4f}'
filepath = ''.join([cvpath, 'test', '_', date_time, '_', info, '.hdf5'])
history = model.fit(xx, y, epochs=10, batch_size=512, validation_split= 0.2)
n_fold = 5
cv = StratifiedKFold(n_splits = n_fold, shuffle=True, random_state=66)
# 테스트데이터의 예측값 담을 곳 생성
test_y = np.zeros((yy.shape[0], 7))
# 조기 종료 옵션 추가
es = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=5, verbose=1, mode='min', baseline=None, restore_best_weights=True)
cp = ModelCheckpoint(monitor='val_loss', save_best_only=True, mode='auto', verbose=1, filepath=filepath)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', verbose=1, patience=10, mode='auto', factor=0.1 )
start_time = time.time()
for i, (i_trn, i_val) in enumerate(cv.split(xx, y_train), 1):
print(f'training model for CV #{i}')
model.fit(xx[i_trn],
to_categorical(y_train[i_trn]),
validation_data=(xx[i_val], to_categorical(y_train[i_val])),
epochs=5,
batch_size=512,
callbacks=[es,cp]) # 조기 종료 옵션
test_y += model.predict(yy) / n_fold
topic = []
for i in range(len(test_y)):
topic.append(np.argmax(test_y[i]))
end_time = time.time() - start_time
submission['topic_idx'] = topic
ic(submission.shape)
date_time = datetime.datetime.now().strftime("%y%m%d_%H%M")
submission.to_csv('./Dacon/_save/csv/predict' + date_time + '.csv', index=False)
|
[
"tkackeowjs@naver.com"
] |
tkackeowjs@naver.com
|
26c8a0b64e7d7cd923089885b32824ea3c70e05b
|
3d2f5c005bbf4b4194fc105b9c2492d2bd09109c
|
/dynamic_scraper/utils/processors.py
|
bd0c67b62fb18e5f462ab21b7c780165b1adcc79
|
[] |
no_license
|
mtaziz/django-dynamic-scraper
|
e9a51a3b95a84767412df5edb9806dae5bdb69e1
|
87ae65ec97a405e03e1c2493637581bfe2545410
|
refs/heads/master
| 2021-01-22T01:43:55.587001
| 2016-01-07T09:43:51
| 2016-01-07T09:43:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,393
|
py
|
#Stage 2 Update (Python 3)
from __future__ import unicode_literals
from builtins import str
import datetime
from scrapy import log
def string_strip(text, loader_context):
if not isinstance(text, str):
text = str(text)
chars = loader_context.get('string_strip', ' \n\t\r')
return text.strip(chars)
def pre_string(text, loader_context):
pre_str = loader_context.get('pre_string', '')
return pre_str + text
def post_string(text, loader_context):
post_str = loader_context.get('post_string', '')
return text + post_str
def pre_url(text, loader_context):
pre_url = loader_context.get('pre_url', '')
if(pre_url[0:7] == 'http://' and text[0:7] == 'http://'):
return text
if(pre_url[-1:] == '/' and text[0:1] == '/'):
pre_url = pre_url[:-1]
return pre_url + text
def replace(text, loader_context):
replace = loader_context.get('replace', '')
return replace
def static(text, loader_context):
static = loader_context.get('static', '')
return static
def date(text, loader_context):
cformat = loader_context.get('date')
try:
if text.lower() in ['gestern', 'yesterday',]:
date = datetime.date.today() - datetime.timedelta(1)
elif text.lower() in ['heute', 'today',]:
date = datetime.date.today()
elif text.lower() in ['morgen', 'tomorrow',]:
date = datetime.date.today() + datetime.timedelta(1)
else:
date = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Date could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return date.strftime('%Y-%m-%d')
def time(text, loader_context):
cformat = loader_context.get('time')
try:
time = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Time could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return time.strftime('%H:%M:%S')
def ts_to_date(ts_str, loader_context):
try:
ts_int = int(ts_str)
return datetime.datetime.fromtimestamp(ts_int).strftime('%Y-%m-%d')
except ValueError:
loader_context.get('spider').log('Timestamp could not be parsed ("%s")!' % ts_str, log.ERROR)
return None
def ts_to_time(ts_str, loader_context):
try:
ts_int = int(ts_str)
return datetime.datetime.fromtimestamp(ts_int).strftime('%H:%M:%S')
except ValueError:
loader_context.get('spider').log('Timestamp could not be parsed ("%s")!' % ts_str, log.ERROR)
return None
def _breakdown_time_unit_overlap(time_str, limit):
time_list = time_str.split(':')
first = int(time_list[0])
if first >= limit:
time_list[0] = str(first % limit)
time_list.insert(0, str(first // limit))
else:
if(len(time_list[0]) == 1):
time_list[0] = '0' + time_list[0]
time_list.insert(0, '00')
time_str = ':'.join(time_list)
return time_str
def duration(text, loader_context):
cformat = loader_context.get('duration')
#Value completion in special cases
text_int = None
try:
text_int = int(text)
except ValueError:
pass
if(cformat == '%H:%M'):
if text_int:
text += ':00'
if(cformat == '%M'):
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%H:%M'
if(cformat == '%M:%S'):
if text_int:
text += ':00'
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%H:%M:%S'
if(cformat == '%S'):
if text_int:
if text_int >= 3600:
hours_str = str(text_int // 3600) + ':'
secs_under_hour_str = str(text_int % 3600)
text = hours_str + _breakdown_time_unit_overlap(secs_under_hour_str, 60)
cformat = '%H:%M:%S'
else:
text = _breakdown_time_unit_overlap(text, 60)
cformat = '%M:%S'
try:
duration = datetime.datetime.strptime(text, cformat)
except ValueError:
loader_context.get('spider').log('Duration could not be parsed ("%s", Format string: "%s")!' % (text, cformat), log.ERROR)
return None
return duration.strftime('%H:%M:%S')
|
[
"Holger.Drewes@googlemail.com"
] |
Holger.Drewes@googlemail.com
|
8e45a5d0f33b6ef3c3ec1f9a9cbe1fafc2635b40
|
aef4faef04d851fe24f2b3f28ae98daa2152e543
|
/spikestim/negbin_bayes.py
|
c4b32098dcf07116803003630d8ad78e1e479204
|
[] |
no_license
|
dattalab/spikestim
|
b1580eec250e0a6b03796b200dfe22b15112228b
|
631152b3f173dc6c8fa2601ad917f899dc1210b9
|
refs/heads/master
| 2021-01-21T08:44:00.379078
| 2015-12-04T13:53:51
| 2015-12-04T13:53:51
| 45,796,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
from __future__ import division
import numpy as np
from pybasicbayes.distributions import NegativeBinomial
'''
The code in this file provides utilities for Bayesian estimation of negative
binomial parameters through MCMC methods provided by pybasicbayes.
The main function to use is get_posterior_samples(data, num_samples).
The NB class sets the prior to use sensible defaults, namely
p ~ Beta(alpha=1., beta=1.)
r ~ Gamma(k=1., theta=1)
That is, the prior on p is uniform on [0,1] and the prior on r is exponential
with rate 1.
'''
class NB(NegativeBinomial):
def __init__(self, r=None, p=None, alpha=1., beta=1., k=1., theta=1.):
super(NB, self).__init__(
r=r, p=p, alpha_0=alpha, beta_0=beta, k_0=k, theta_0=theta)
def get_posterior_samples(data, num_samples):
distn = NB()
data = np.require(data, requirements='C')
samples = []
for _ in xrange(num_samples):
distn.resample(data)
samples.append((distn.r, distn.p))
return samples
# these next two functions are redundant with negbin_maxlike.py, but use
# pybasicbayes implementations instead
def negbin_loglike(r, p, x):
return NB(r=r, p=p).log_likelihood(x)
def negbin_sample(r, p, size):
return NB(r=r, p=p).rvs(size)
|
[
"mattjj@csail.mit.edu"
] |
mattjj@csail.mit.edu
|
e1f6613cc9681c42b7d3e9225e62499c04ee0236
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2/VelvetTie/pancakes.py
|
c27cdec091f729b20528e32b3b22c5d62d391cef
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,853
|
py
|
import sys
def flip(to_flip):
"""
>>> flip('-')
'+'
>>> flip('+')
'-'
>>> flip('+++')
'---'
>>> flip('-+')
'-+'
>>> flip('--+')
'-++'
:param to_flip:
:return:
"""
num_pancakes = len(to_flip)
flipped = [0 for i in range(num_pancakes)]
for i in range(num_pancakes):
if to_flip[i] == '-':
flipped[num_pancakes - 1 - i] = '+'
else:
flipped[num_pancakes - 1 - i] = '-'
return ''.join(flipped)
def count_flips(pancakes):
"""
>>> count_flips('-')
1
>>> count_flips('+')
0
>>> count_flips('-+')
1
>>> count_flips('+-')
2
>>> count_flips('+++')
0
>>> count_flips('--+-')
3
:param pancakes - string
"""
num_flips = 0
if '-' not in pancakes:
#print('No flips needed')
return num_flips
elif '+' not in pancakes:
#print('Just flip once')
pancakes = flip(pancakes)
num_flips = 1
return num_flips
else:
ref_sign = pancakes[0]
to_flip = ''
for p in pancakes:
if p != ref_sign:
break
else:
to_flip += p
print("to_flip={}".format(to_flip), file=sys.stderr)
flipped = flip(to_flip)
num_flips += 1
# Put together new pancake stack.
num_flipped = len(flipped)
new_stack = flipped + pancakes[num_flipped:]
print("new_stack={}".format(new_stack), file=sys.stderr)
num_flips += count_flips(new_stack)
return num_flips
def read_input():
num_test_cases = int(input())
for t in range(1, num_test_cases + 1):
pancakes = input()
flips = count_flips(pancakes)
print("Case #{}: {}".format(t, flips))
if __name__ == "__main__":
read_input()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
b6d9a025d803dd7983357c17927e7543de6c6a3d
|
c2415d039d12cc3b1d587ce497527ff62867df41
|
/authentication/config.py
|
618701e23120609d0033d92185799d74b676d777
|
[
"MIT"
] |
permissive
|
admin-dashboards/django-dashboard-light
|
dc207c07d1987b7b4af52c449502ccd797d4d979
|
96142cf7f9e807e575a1d444e1bb291d3f956652
|
refs/heads/master
| 2022-09-04T16:48:08.087092
| 2022-08-23T07:37:03
| 2022-08-23T07:37:03
| 232,156,115
| 1
| 0
|
MIT
| 2020-01-06T18:02:01
| 2020-01-06T18:02:00
| null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.apps import AppConfig
class AuthConfig(AppConfig):
name = 'authcfg'
|
[
"developer@rosoftware.ro"
] |
developer@rosoftware.ro
|
a42a79d97b8e29b148476dc6d40564bfd92ecc97
|
d644b6cabb4fa88cf900c59799a2897f5a0702d8
|
/tests/base_tests/polygon_tests/test_triangulate.py
|
47fe7d21c053c0b7be0a50fce9abd3e709ea7763
|
[
"MIT"
] |
permissive
|
lycantropos/gon
|
c3f89a754c60424c8e2609e441d7be85af985455
|
177bd0de37255462c60adcbfcdf76bfdc343a9c1
|
refs/heads/master
| 2023-07-06T01:11:57.028646
| 2023-06-26T20:47:14
| 2023-06-27T00:30:06
| 194,597,548
| 15
| 1
|
MIT
| 2023-06-27T00:30:07
| 2019-07-01T04:06:06
|
Python
|
UTF-8
|
Python
| false
| false
| 546
|
py
|
from functools import reduce
from operator import or_
from hypothesis import given
from gon.base import (Polygon,
Triangulation)
from . import strategies
@given(strategies.polygons)
def test_basic(polygon: Polygon) -> None:
result = polygon.triangulate()
assert isinstance(result, Triangulation)
@given(strategies.polygons)
def test_round_trip(polygon: Polygon) -> None:
result = polygon.triangulate()
assert (reduce(or_, [Polygon(contour) for contour in result.triangles()])
== polygon)
|
[
"azatibrakov@gmail.com"
] |
azatibrakov@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.