blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
31fe857b5336046fc4b93282f053a38847cfe0bf | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/program/model/listing/Bookmark.pyi | daf5c68be46b4922037f73b7817afe92f2eb5835 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | pyi | import ghidra.program.model.address
import ghidra.program.model.listing
import java.lang
class Bookmark(java.lang.Comparable, object):
"""
Interface for bookmarks. Bookmarks are locations that are marked within the program so
that they can be easily found.
"""
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getAddress(self) -> ghidra.program.model.address.Address:
"""
Returns address at which this bookmark is applied.
"""
...
def getCategory(self) -> unicode:
"""
Returns bookmark category
"""
...
def getClass(self) -> java.lang.Class: ...
def getComment(self) -> unicode:
"""
Returns bookmark comment
"""
...
def getId(self) -> long:
"""
Returns the id of the bookmark.
"""
...
def getType(self) -> ghidra.program.model.listing.BookmarkType:
"""
Returns bookmark type object.
"""
...
def getTypeString(self) -> unicode:
"""
Returns bookmark type as a string
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def set(self, category: unicode, comment: unicode) -> None:
"""
Set the category and comment associated with a bookmark.
@param category category
@param comment single line comment
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def address(self) -> ghidra.program.model.address.Address: ...
@property
def category(self) -> unicode: ...
@property
def comment(self) -> unicode: ...
@property
def id(self) -> long: ...
@property
def type(self) -> ghidra.program.model.listing.BookmarkType: ...
@property
def typeString(self) -> unicode: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
475b498730558cf3635706a19a5fa90410feb142 | 5e8a936891f0687a28425cef248a49480511119d | /pydl/photoop/sdssio/sdssflux2ab.py | 47c8f6487d8d12f42405f6e9f4201e85ccf8ddba | [
"BSD-3-Clause"
] | permissive | bsipocz/pydl | 426a4cdae003b8c4e86249ec36005925d8ffa341 | f8719699c71806f9b55dd41f843e5f35c64c770b | refs/heads/master | 2020-12-07T15:43:01.554352 | 2015-09-24T03:15:14 | 2015-09-24T03:15:14 | 45,721,009 | 0 | 0 | null | 2015-11-07T03:30:30 | 2015-11-07T03:30:29 | null | UTF-8 | Python | false | false | 1,515 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
def sdssflux2ab(flux,magnitude=False,ivar=False):
"""Convert the SDSS calibrated fluxes (magnitudes) into AB fluxes (magnitudes)
Parameters
----------
flux : :class:`numpy.ndarray`
Array of calibrated fluxes or SDSS magnitudes with 5 columns,
corresponding to the 5 filters u,g,r,i,z
magnitude : :class:`bool`, optional
If set to ``True``, then assume `flux` are SDSS magnitudes instead of linear
flux units
ivar : :class:`numpy.ndarray`, optional
If set, the input fluxes are actually inverse variances.
Returns
-------
sdssflux2ab : :class:`numpy.ndarray`
Array of fluxes or magnitudes on the AB system.
Notes
-----
Uses the conversions posted by D.Hogg (sdss-calib/845)::
u(AB,2.5m) = u(2.5m) - 0.042
g(AB,2.5m) = g(2.5m) + 0.036
r(AB,2.5m) = r(2.5m) + 0.015
i(AB,2.5m) = i(2.5m) + 0.013
z(AB,2.5m) = z(2.5m) - 0.002
"""
import numpy as np
#
# Correction vector, adjust this as necessary
#
correction = np.array([-0.042, 0.036, 0.015, 0.013, -0.002])
rows, cols = flux.shape
abflux = flux.copy()
if magnitude:
for i in range(rows):
abflux[i,:] += correction
else:
factor = 10.0**(-correction/2.5)
if ivar:
factor = 1.0/factor**2
for i in range(rows):
abflux[i,:] *= factor
return abflux
| [
"benjamin.weaver@nyu.edu"
] | benjamin.weaver@nyu.edu |
2029faea3ce5dd5bad299577b11c46950ad7913a | 9ddee35a496f1b99d4f9b634711d30ad9f05a2fa | /case/httpdns/1070.py | 7e2515e5721ac05c0e0485a9afdee9851e57738d | [] | no_license | namesuqi/pirate | 79824f231990030bdce9e092b300bb58d4ba0024 | 9308fbfa021f1238c2bd6faeb7d82c2934570140 | refs/heads/master | 2020-03-07T04:19:07.041355 | 2018-03-29T08:44:29 | 2018-03-29T08:44:29 | 127,263,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,818 | py | #!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
from libs.database.etcd_handler import *
if __name__ == "__main__":
# 需要根据当前实际配置修改
ttl_conf_old = {"report.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"channel.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"upgradev2.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"opt.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"hls.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"stats.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"seeds.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"vodtest.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"stun2.crazycdn.com": {"ips": {"default": ["118.190.148.163"]}, "ttl": 1800},
"live-ch.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"ts.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"control.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"errlogs.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800}}
# print(read_etcd_key('/business/httpdns/v2/domain_ip_map/default'))
ttl_conf_1 = {"report.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"channel.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"upgradev2.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"opt.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"hls.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"stats.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"seeds.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"vodtest.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"stun2.crazycdn.com": {"ips": {"default": ["118.190.148.163"]}, "ttl": 300},
"live-ch.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"ts.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"control.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"errlogs.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300}}
set_etcd_key('default', ttl_conf_old, '/business/httpdns/v2/domain_ip_map/') | [
"suqi_name@163.com"
] | suqi_name@163.com |
0f4cf276661a0de3bf63c88b9d994d2d5f69c7d8 | 4b8724ec7bb224ff942264384dd7cb9c96676789 | /car_project/apps/car_app/apps.py | 5918e5de83ee63f6642071520899f8fe9ebc0126 | [] | no_license | JeffLawrence1/Python-Django-Advanced | b4822528bbaa5a235817121b32407c11408d8932 | b41018d907f06aeb7ca05f1f4b030eedf58471e7 | refs/heads/master | 2020-03-09T07:59:11.954262 | 2018-04-08T20:15:45 | 2018-04-08T20:15:45 | 128,678,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class CarAppConfig(AppConfig):
name = 'car_app'
| [
"jefflaw13@hotmail.com"
] | jefflaw13@hotmail.com |
82c22ab3f3403a1a2baf72e25753d76f9ac0fb61 | 761b013bc7cd98850f3f26e3f0b0a8e4ec8fdf7d | /arm.py | 833bd4af8bd165347ac2ea8e3bc4affa52299173 | [] | no_license | tjmode/placement | 72e6bca120cb785cac3fc829d84b921af8add01a | ac5af6394e74212bf44510af837a144254ca2f99 | refs/heads/master | 2020-04-15T16:36:03.489076 | 2019-07-31T06:14:51 | 2019-07-31T06:14:51 | 164,842,584 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | num = int(input())
order = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if num == sum:
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | tjmode.noreply@github.com |
f0e6d7d0c53802aa01ca3c267ba04ebe3f7bb546 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-1/vse-naloge-brez-testov/DN7-M-182.py | 4b4ce11c5808c697cbb1e47ed4e9be02e9751e48 | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,845 | py | # To funkcijo prijazno podarjam vsem, ki bodo programirali v eni vrstici. :)
# Kako jo uporabiti, je v navodilih. Kdor je ne potrebuje, naj jo ignorira.
def vsa_polja(s, v):
"""
Generiraj vse koordinate (x, y) za polje s podano širino in višino
Args:
s (int): širina
v (int): višina
Returns:
generator parov polj
"""
return ((x, y) for x in range(s) for y in range(v))
########################
# Za oceno 6
def sosedov(x, y, mine):
"""
Vrni število sosedov polja s koordinatami `(x, y)` na katerih je mina.
Polje samo ne šteje.
Args:
x (int): koordinata x
y (int): koordinata y
mine (set of tuple of int): koordinate min
Returns:
int: število sosedov
"""
def najvec_sosedov(mine, s, v):
"""
Vrni koordinati polja z največ sosednjih min
Args:
mine (set of (int, int)): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
tuple of int: koordinati polja
"""
def brez_sosedov(mine, s, v):
"""
Vrni množico koordinat polj brez min na sosednjih poljih. Polje samo lahko
vsebuje mino.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
set of tuple: polja brez min na sosednjih poljih
"""
def po_sosedih(mine, s, v):
"""
Vrni slovar, katerega ključi so možna števila sosednjih polj z minami
(torej števila od 0 do 8), vrednosti pa množice koordinat polj s toliko
sosedami.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
dict: (glej zgoraj)
"""
########################
# Za oceno 7
def dolzina_poti(pot):
"""
Vrni dolžino podane poti, vključno z vmesnimi polji.
Args:
pot (list of tuple): seznam koordinat polj
Returns:
int: dolžina poti
"""
def varen_premik(x0, y0, x1, y1, mine):
"""
Vrni `True`, če je pomik z (x0, y0) and (x1, y1) varen, `False`, če ni.
Args:
x0 (int): koordinata x začetnega polja
y0 (int): koordinata y začetnega polja
x1 (int): koordinata x končnega polja
y1 (int): koordinata y končnega polja
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je premik varen, `False`, če ni.
"""
def varna_pot(pot, mine):
"""
Vrni `True`, če je podana pot varna, `False`, če ni.
Args:
pot (list of tuple of int): koordinate točk na poti (brez vmesnih točk)
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je pot varna, `False`, če ni.
"""
########################
# Za oceno 8
def polje_v_mine(polje):
"""
Vrni koordinate min v podanem polju.
Niz polje opisuje polje tako, da so vodoravne "vrstice" polja ločene s
presledki. Prosta polja so označena z znako `.`, mine z `X`.
Args:
polje (str): polje
Returns:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja.
"""
########################
# Za oceno 9
#
# Vse funkcije za oceno 6 in 7 morajo biti napisane v eni vrstici.
########################
# Za oceno 10
def preberi_pot(ukazi):
"""
Za podani seznam ukazov (glej navodila naloge) vrni pot.
Args:
ukazi (str): ukazi, napisani po vrsticah
Returns:
list of tuple of int: pot
"""
def zapisi_pot(pot):
"""
Za podano pot vrni seznam ukazov (glej navodila naloge).
Args:
pot (list of tuple of int): pot
Returns:
str: ukazi, napisani po vrsticah
"""
def sosedov(x, y, mine):
stevec=0
for x1, y1 in mine:
if ((x1 + 1) == x) and y1 == y:
stevec = stevec + 1
if ((x1 - 1) == x) and y1 == y:
stevec = stevec + 1
if (((x1 + 1) == x) and (y1 + 1 == y)):
stevec = stevec + 1
if (((x1 - 1) == x) and (y1 + 1 == y)):
stevec = stevec + 1
if ((x1 == x) and (y1 + 1 == y)):
stevec = stevec + 1
if (((x1 + 1) == x) and (y1 - 1 == y)):
stevec = stevec + 1
if (((x1 - 1) == x) and (y1 - 1 == y)):
stevec = stevec + 1
if ((x1 == x) and (y1 - 1 == y)):
stevec = stevec + 1
return stevec
def najvec_sosedov(mine, s, v):
največ_min=0
maskimum_koordinate=0,0
for x,y in vsa_polja(s,v):
vsota_min=sosedov(x,y,mine)
if vsota_min>največ_min:
največ_min=vsota_min
maskimum_koordinate=x,y
return maskimum_koordinate
def brez_sosedov(mine, s, v):
seznam_koordinat=set()
koordinate_brez_min=0,0
for x,y in vsa_polja(s,v):
brez_min=sosedov(x,y,mine)
if brez_min==0:
koordinate_brez_min=x,y
seznam_koordinat.add(koordinate_brez_min)
return seznam_koordinat
def po_sosedih(mine, s, v):
slovar_sosedi={}
i=0
while i <= 8:
seznam_koordinat = set()
for x, y in vsa_polja(s, v):
brez_min = sosedov(x, y, mine)
if brez_min == i:
seznam_koordinat.add((x,y))
slovar_sosedi[i]=seznam_koordinat
i=i+1
return slovar_sosedi
def dolzina_poti(pot):
dolzina = 0
razdalja2 = 0
razdalja1 = 0
for x, y in pot:
x1 = x
y1 = y
break
for x, y in pot:
razdalja1 = y - y1
if razdalja1 < 0:
razdalja1 = razdalja1 + 2 * (-razdalja1)
dolzina = dolzina + razdalja1
razdalja2 = x - x1
if razdalja2 < 0:
razdalja2 = razdalja2 + 2 * (-razdalja2)
dolzina = dolzina + razdalja2
x1 = x
y1 = y
return (dolzina)
def varen_premik(x0, y0, x1, y1, mine):
for a, b in mine:
if (x0 <= a <= x1 or x1 <= a <= x0) and (y0 <= b <= y1 or y1 <= b <= y0):
return False
return True
def varna_pot(pot, mine):
for x0, y0 in pot:
for a, b in mine:
if x0 == a and y0 == b:
return False
for (x0, y0), (x1, y1) in zip(pot, pot[1:]):
for a, b in mine:
if (x0 <= a <= x1 or x1 <= a <= x0) and (y0 <= b <= y1 or y1 <= b <= y0):
return False
return True
def polje_v_mine(polje):
sirina=len(polje.split()[0])
visina=len(polje.split())
x = 0
y = 0
stevec_y = 1
mnozica_min=set()
for p in polje:
if p == ".":
x = x + 1
if p == " ":
y = y + 1
x = 0
if p == "X":
mina = x, y
x = x + 1
mnozica_min.add(mina)
return (mnozica_min,sirina,visina)
| [
"lenart.motnikar@gmail.com"
] | lenart.motnikar@gmail.com |
a63c062cdb9013fbce1c5eaec2a18d8d71f46f16 | 220dd5d666b85b716d5762097fb2d320bb8587fd | /test/int/kth_largest.py | 0deb333531b86715608b4220b28bf4dfb7edd0fa | [] | no_license | bensenberner/ctci | 011946a335396def198acaa596d2b5902af7b993 | 1e1ba64580ee96392c92aa95904c4751e32d4e30 | refs/heads/master | 2021-01-10T14:09:01.698323 | 2020-09-06T04:17:53 | 2020-09-06T04:17:53 | 45,659,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import unittest
from int.kth_largest import kth_largest
class Test(unittest.TestCase):
def test(self):
arr = [7, 5, 2, 7, 1, 8, 3]
self.assertEqual(5, kth_largest(arr, 4))
def test_simple(self):
arr = [3, 2, 1]
self.assertEqual(3, kth_largest(arr, 1))
self.assertEqual(2, kth_largest(arr, 2))
| [
"benlerner95@gmail.com"
] | benlerner95@gmail.com |
9c9fec3a233bcb7fb1af897298b1c336ccdc7b53 | f6c103bd11b6a7fe92a2fc48562a06c87a60ac51 | /lard/data.py | e20f63ae7dd15b080427aa653f9a03ea5b66aa0e | [] | no_license | brettviren/lard | d17405ab2df1e8f8ef22a2706f0dcad3fe5b06e7 | 429d71c9912b62f47d18e25c5063eb66d36f6d93 | refs/heads/master | 2021-01-10T14:21:59.327286 | 2015-05-25T02:59:01 | 2015-05-25T02:59:01 | 36,177,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | #!/usr/bin/env python
'''The lard data model.
The objects in this module make up an internal, transient data schema.
Modules under lard.adapters produce this data model and modules under
lard.renderers accept it to produce some end form.
'''
from collections import namedtuple
ScalarPoint = namedtuple('ScalarPoint', 'x y z s')
def CastScalarPoint(d):
if type(d) == dict:
return ScalarPoint(float(d['x']),float(d['y']),float(d['z']),float(d['s']))
if type(d) == tuple or type(d) == list:
return ScalarPoint(float(d[0]),float(d[1]),float(d[2]),float(d[3]))
return ScalarPoint(float(d.x),float(d.y),float(d.z),float(d.s))
#from schema import Schema, Use, Optional
from voluptuous import Schema, Optional
# schema v1 is a pair of lists of scalar point values, for simulation "truth" and reconstructed.
schema_v1 = Schema(
{
Optional('truth'): [CastScalarPoint],
Optional('recon'): [CastScalarPoint],
}
)
# Most recent version
schema = schema_v1
def validate(data, version=None):
'Validate data against schema'
if version is None:
return schema(data)
if version == 'v1' or version == 1:
return schema_v1(data)
return version(data) # assume version is a Schema object
| [
"brett.viren@gmail.com"
] | brett.viren@gmail.com |
8a65e97c3ab254c2ee98390b1357635b7e212d35 | d768f07ed90c0274e2d9d935eaf5ecfe734a1f56 | /fit_mcmc_ps.py | 2f483e28f42d7cbdab267fa0e28c4998c1cd56ba | [] | no_license | bvillasen/simulation_analysis | cfd0b5de865d2fb5992d828b2824079e6798774b | 645f0c397172ed30a713368942eec9ca68a9761a | refs/heads/master | 2023-06-02T19:06:39.851760 | 2021-06-25T18:40:58 | 2021-06-25T18:40:58 | 298,894,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,117 | py | import os, sys
import numpy as np
import pickle
import matplotlib.pyplot as plt
sys.path.append('tools')
from tools import *
#Append analysis directories to path
extend_path()
from parameters_UVB_rates import param_UVB_Rates
from simulation_grid import Simulation_Grid
from simulation_parameters import *
from mcmc_functions import *
from mcmc_data_functions import *
from data_thermal_history import *
from mcmc_plotting_functions import *
from mcmc_sampling_functions import *
# data_sets = [ 'Boss', 'Walther', 'Boera', 'Viel' ]
data_ps_sets = [ 'Boss' ]
# data_sets = [ 'Walther' ]
# data_sets = [ 'Boera' ]
# data_sets = [ 'Boss', 'Walther' ]
# data_sets = [ 'Walther', 'Boera' ]
# data_sets = [ 'Walther', 'Viel' ]
name = ''
for data_set in data_ps_sets:
name += data_set + '_'
name = name[:-1]
field = 'P(k)+'
ps_data_dir = 'lya_statistics/data/'
mcmc_dir = root_dir + 'fit_mcmc/'
create_directory( mcmc_dir )
output_dir = mcmc_dir + f'fit_results_{field}_{name}/'
create_directory( output_dir )
# load_mcmc_results = False
load_mcmc_results = True
SG = Simulation_Grid( parameters=param_UVB_Rates, sim_params=sim_params, job_params=job_params, dir=root_dir )
SG.Load_Grid_Analysis_Data()
ps_range = SG.Get_Power_Spectrum_Range( kmax=0.01 )
sim_ids = SG.sim_ids
z_min = 2.0
z_max = 5.0
ps_extras = { 'range':ps_range, 'data_dir':ps_data_dir, 'data_sets':data_ps_sets }
comparable_data = Get_Comparable_Composite( field, z_min, z_max, ps_extras=ps_extras )
comparable_grid = Get_Comparable_Composite_from_Grid( field, comparable_data, SG )
# Plot_Comparable_Data( field, comparable_data, comparable_grid, output_dir )
z_vals = [ 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.6, 5.0, ]
data_grid, data_grid_power_spectrum = Get_Data_Grid_Composite( field, SG, z_vals=z_vals )
stats_file = output_dir + 'fit_mcmc.pkl'
samples_file = output_dir + 'samples_mcmc.pkl'
params = SG.parameters
if load_mcmc_results:
print( f'Loading File: {stats_file}')
stats = pickle.load( open( stats_file, 'rb' ) )
param_stats = {}
for p_id in params.keys():
p_name = params[p_id]['name']
p_stats = stats[p_name]
params[p_id]['mean'] = p_stats['mean']
params[p_id]['sigma'] = p_stats['standard deviation']
print( f'Loading File: {samples_file}')
param_samples = pickle.load( open( samples_file, 'rb' ) )
else:
nIter = 200000
nBurn = nIter / 5
nThin = 1
# model, params_mcmc = mcmc_model_3D( comparable_data, comparable_grid, field, 'mean', SG )
model, params_mcmc = mcmc_model_4D( comparable_data, comparable_grid, field, 'mean', SG )
MDL = pymc.MCMC( model )
MDL.sample( iter=nIter, burn=nBurn, thin=nThin )
stats = MDL.stats()
param_stats = {}
for p_id in params.keys():
p_name = params[p_id]['name']
p_stats = stats[p_name]
params[p_id]['mean'] = p_stats['mean']
params[p_id]['sigma'] = p_stats['standard deviation']
Plot_MCMC_Stats( stats, MDL, params_mcmc, stats_file, output_dir, plot_corner=False )
param_samples = Write_MCMC_Results( stats, MDL, params_mcmc, stats_file, samples_file, output_dir )
# Make Corner plot from posteriors
labels = { 'scale_He':r'$\beta_{\mathrm{He}}$', 'scale_H':r'$\beta_{\mathrm{H}}$', 'deltaZ_He':r'$\Delta z_{\mathrm{He}}$', 'deltaZ_H':r'$\Delta z_{\mathrm{H}}$' }
Plot_Corner( param_samples, labels, output_dir )
# Get the Highest_Likelihood parameter values
params_HL = Get_Highest_Likelihood_Params( param_samples, n_bins=100 )
hpi_sum = 0.95
n_samples = 1000
# Obtain distribution of the power spectrum
samples_ps = Sample_Power_Spectrum_from_Trace( param_samples, data_grid_power_spectrum, SG, hpi_sum=hpi_sum, n_samples=n_samples, params_HL=params_HL )
Plot_Power_Spectrum_Sampling( samples_ps, ps_data_dir, output_dir, scales='large', system=system )
#
# # Obtain distribution of the other fields
# field_list = ['T0']
# samples_fields = Sample_Fields_from_Trace( field_list, param_samples, data_grid, SG, hpi_sum=hpi_sum, n_samples=n_samples, params_HL=params_HL )
# Plot_T0_Sampling( samples_fields['T0'], comparable_data, output_dir, system=system )
| [
"bvillasen@gmail.com"
] | bvillasen@gmail.com |
da37392ae50890a6cbecaf0cfad0d4d00faaf40f | d6c66cea8c8a91681c2c913f663c2d3ea5d73f0c | /0. Introduction/copyspecial/copyspecial.py | bfe916fef6a1405d9a91d828836e8660b2a43ad1 | [] | no_license | drewlinsley/CLPS1950_assignments | 28cc578cd981b417078a14af0fd362801a805a72 | 10f1df11d8270b0865ad03eb02e36e7b2f010159 | refs/heads/master | 2020-03-28T03:51:11.884643 | 2018-09-06T13:14:02 | 2018-09-06T13:14:02 | 147,675,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# https://developers.google.com/edu/python/exercises/copy-special
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
| [
"drewlinsley@gmail.com"
] | drewlinsley@gmail.com |
81035d4ccc746d19a13221ac8581635b6ffb7cda | ba66da3901361854b9bb621586f1e49ad0121ee0 | /正式开班/第十三天/网络编程/deepin_test.py | 9fb856a8503ceb648ae75190f86d2fa1bd0780e4 | [] | no_license | luobodage/PythonBasis | c4739920055afbda03774d90151ab183a83583f8 | ea65536e759fec221a70d7647ae86120277d5459 | refs/heads/master | 2023-05-14T15:51:56.213282 | 2021-05-31T00:57:56 | 2021-05-31T00:57:56 | 322,145,745 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import socket
def main():
# while True:
ip_addrs = ('192.168.56.1', 8899)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cotent = input('请输入您要发送的内容:')
udp.sendto(cotent.encode('gbk'), ip_addrs)
udp.close()
if __name__ == '__main__':
main()
| [
"fuyu16032001@gmail.com"
] | fuyu16032001@gmail.com |
17f14936cb5142adcf736ab599ebe5d21785778c | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.6.2/bin/weewx/__init__.py | 57b8d45778a75f501f7d386c6d2692212d3bb389 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 5,375 | py | #
# Copyright (c) 2009-2021 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Package weewx, containing modules specific to the weewx runtime engine."""
from __future__ import absolute_import
import time
__version__="4.6.2"
# Holds the program launch time in unix epoch seconds:
# Useful for calculating 'uptime.'
launchtime_ts = time.time()
# Set to true for extra debug information:
debug = False
# Exit return codes
CMD_ERROR = 2
CONFIG_ERROR = 3
IO_ERROR = 4
DB_ERROR = 5
# Constants used to indicate a unit system:
METRIC = 0x10
METRICWX = 0x11
US = 0x01
# =============================================================================
# Define possible exceptions that could get thrown.
# =============================================================================
class WeeWxIOError(IOError):
"""Base class of exceptions thrown when encountering an input/output error
with the hardware."""
class WakeupError(WeeWxIOError):
"""Exception thrown when unable to wake up or initially connect with the
hardware."""
class CRCError(WeeWxIOError):
"""Exception thrown when unable to pass a CRC check."""
class RetriesExceeded(WeeWxIOError):
"""Exception thrown when max retries exceeded."""
class HardwareError(Exception):
"""Exception thrown when an error is detected in the hardware."""
class UnknownArchiveType(HardwareError):
"""Exception thrown after reading an unrecognized archive type."""
class UnsupportedFeature(Exception):
"""Exception thrown when attempting to access a feature that is not
supported (yet)."""
class ViolatedPrecondition(Exception):
"""Exception thrown when a function is called with violated
preconditions."""
class StopNow(Exception):
"""Exception thrown to stop the engine."""
class UnknownDatabase(Exception):
"""Exception thrown when attempting to use an unknown database."""
class UnknownDatabaseType(Exception):
"""Exception thrown when attempting to use an unknown database type."""
class UnknownBinding(Exception):
"""Exception thrown when attempting to use an unknown data binding."""
class UnitError(ValueError):
"""Exception thrown when there is a mismatch in unit systems."""
class UnknownType(ValueError):
"""Exception thrown for an unknown observation type"""
class UnknownAggregation(ValueError):
"""Exception thrown for an unknown aggregation type"""
class CannotCalculate(ValueError):
"""Exception raised when a type cannot be calculated."""
# =============================================================================
# Possible event types.
# =============================================================================
class STARTUP(object):
"""Event issued when the engine first starts up. Services have been
loaded."""
class PRE_LOOP(object):
"""Event issued just before the main packet loop is entered. Services
have been loaded."""
class NEW_LOOP_PACKET(object):
"""Event issued when a new LOOP packet is available. The event contains
attribute 'packet', which is the new LOOP packet."""
class CHECK_LOOP(object):
"""Event issued in the main loop, right after a new LOOP packet has been
processed. Generally, it is used to throw an exception, breaking the main
loop, so the console can be used for other things."""
class END_ARCHIVE_PERIOD(object):
"""Event issued at the end of an archive period."""
class NEW_ARCHIVE_RECORD(object):
"""Event issued when a new archive record is available. The event contains
attribute 'record', which is the new archive record."""
class POST_LOOP(object):
"""Event issued right after the main loop has been broken. Services hook
into this to access the console for things other than generating LOOP
packet."""
# =============================================================================
# Service groups.
# =============================================================================
# All existent service groups and the order in which they should be run:
all_service_groups = ['prep_services', 'data_services', 'process_services', 'xtype_services',
'archive_services', 'restful_services', 'report_services']
# =============================================================================
# Class Event
# =============================================================================
class Event(object):
"""Represents an event."""
def __init__(self, event_type, **argv):
self.event_type = event_type
for key in argv:
setattr(self, key, argv[key])
def __str__(self):
"""Return a string with a reasonable representation of the event."""
et = "Event type: %s | " % self.event_type
s = "; ".join("%s: %s" %(k, self.__dict__[k]) for k in self.__dict__ if k!="event_type")
return et + s
def require_weewx_version(module, required_version):
"""utility to check for version compatibility"""
from distutils.version import StrictVersion
if StrictVersion(__version__) < StrictVersion(required_version):
raise UnsupportedFeature("%s requires weewx %s or greater, found %s"
% (module, required_version, __version__))
| [
"tom@tom.org"
] | tom@tom.org |
dceefecc0bbc05158cdf9888075288b412680302 | a7b175357e1ed29dc8332a950e320e64f5db6703 | /venv/Lib/site-packages/wx/py/PyShell.py | c42152a96a555923eeaae65935079cb89adcf6cc | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | saleguas/deskOrg | f65b8603464dbb0e17363ca8a724c12d45da8116 | c21d9abf56e1756fa8073ccc3547ec9a85d83e2a | refs/heads/master | 2022-12-13T18:06:33.029636 | 2020-04-05T20:19:56 | 2020-04-05T20:19:56 | 164,255,371 | 3 | 1 | MIT | 2022-12-08T01:42:51 | 2019-01-05T22:15:27 | Python | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/env python
"""PyShell is a python shell application."""
# The next two lines, and the other code below that makes use of
# ``__main__`` and ``original``, serve the purpose of cleaning up the
# main namespace to look as much as possible like the regular Python
# shell environment.
import __main__
original = list(__main__.__dict__.keys())
__author__ = "Patrick K. O'Brien <pobrien@orbtech.com>"
import wx
import os
class App(wx.App):
"""PyShell standalone application."""
def OnInit(self):
import os
import wx
from wx import py
self.SetAppName("pyshell")
confDir = wx.StandardPaths.Get().GetUserDataDir()
if not os.path.exists(confDir):
os.mkdir(confDir)
fileName = os.path.join(confDir, 'config')
self.config = wx.FileConfig(localFilename=fileName)
self.config.SetRecordDefaults(True)
self.frame = py.shell.ShellFrame(config=self.config, dataDir=confDir)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
'''
The main() function needs to handle being imported, such as with the
pyshell script that wxPython installs:
#!/usr/bin/env python
from wx.py.PyShell import main
main()
'''
def main():
"""The main function for the PyShell program."""
# Cleanup the main namespace, leaving the App class.
import __main__
md = __main__.__dict__
keepers = original
keepers.append('App')
for key in list(md.keys()):
if key not in keepers:
del md[key]
# Create an application instance.
app = App(0)
# Cleanup the main namespace some more.
if 'App' in md and md['App'] is App:
del md['App']
if '__main__' in md and md['__main__'] is __main__:
del md['__main__']
# Mimic the contents of the standard Python shell's sys.path.
import sys
if sys.path[0]:
sys.path[0] = ''
# Add the application object to the sys module's namespace.
# This allows a shell user to do:
# >>> import sys
# >>> sys.app.whatever
sys.app = app
del sys
# Start the wxPython event loop.
app.MainLoop()
if __name__ == '__main__':
main()
| [
"salvadoraleguas@gmail.com"
] | salvadoraleguas@gmail.com |
406e36bcff2429592f817d0372069bb75415b0aa | ba7c4862dfbc9d0469f389c0cdb3fed01f99ebe3 | /plotting/threeD_round3.py | 3fb154b6f963dffcfabc6254d6d1fefc940621cf | [] | no_license | cmontalvo251/Python | 293cbdf8832d7637d5c0b31eadd02d3ccf2f2c05 | 2b12ce043ee41e08537cfb62301c6a55d4661e04 | refs/heads/master | 2023-06-22T21:50:21.225067 | 2023-06-14T13:42:16 | 2023-06-14T13:42:16 | 229,313,158 | 8 | 3 | null | 2021-07-31T16:01:54 | 2019-12-20T18:03:52 | Python | UTF-8 | Python | false | false | 2,738 | py | # Import data
import time
import numpy as np
import plotly.graph_objects as go
def frame_args(duration):
return {"frame": {"duration": duration},
"mode": "immediate",
"fromcurrent": True,
"transition": {"duration": duration, "easing": "linear"},
}
# Generate curve data
t = np.linspace(0, 2*np.pi, 1000)
gamma = 45*np.pi/180.0
xorbit = np.sin(gamma)*np.cos(t)
yorbit = np.sin(t)
zorbit = np.cos(gamma)*np.cos(t)
xm = np.min(xorbit) - 1.5
xM = np.max(xorbit) + 1.5
ym = np.min(yorbit) - 1.5
yM = np.max(yorbit) + 1.5
zm = np.min(zorbit) - 1.5
zM = np.max(zorbit) + 1.5
skip = int(0.01*len(t))
xanimation = xorbit[0:len(t):skip]
yanimation = yorbit[0:len(t):skip]
zanimation = zorbit[0:len(t):skip]
nb_frames = len(xanimation)
fig = go.Figure(frames=[go.Frame(data=go.Scatter3d(
x=[xanimation[k]],
y=[yanimation[k]],
z=[zanimation[k]],
mode="markers",
marker=dict(color="red", size=10),
),
name=str(k) # you need to name the frame for the animation to behave properly
)
for k in range(nb_frames)]) #Loop through all frames
# Add data to be displayed before animation starts
fig.add_trace(go.Scatter3d(
x=[xanimation[0]],
y=[yanimation[1]],
z=[zanimation[2]],
mode="markers",
marker=dict(color="red", size=10)
))
##Add Full orbit
fig.add_trace(go.Scatter3d(
x=xorbit,
y=yorbit,
z=zorbit,
mode="lines",line=dict(width=2, color="blue")
))
sliders = [{"pad": {"b": 10, "t": 60},
"len": 0.9,
"x": 0.1,
"y": 0,
"steps": [{"args": [[f.name], frame_args(0)],
"label": str(k),
"method": "animate",
}
for k, f in enumerate(fig.frames)],}]
# Layout
fig.update_layout(
title='3D Orbit',
width=600,
height=600,
scene=dict(
xaxis=dict(range=[xm,xM]),
yaxis=dict(range=[ym,yM]),
zaxis=dict(range=[zm,zM],autorange=False),
aspectratio=dict(x=1, y=1, z=1),
),
updatemenus = [{
"buttons": [{
"args": [None, frame_args(50)],
"label": "▶", # play symbol
"method": "animate",},
{"args": [[None], frame_args(0)],
"label": "◼", # pause symbol
"method": "animate",},
],
"direction": "left",
"pad": {"r": 10, "t": 70},
"type": "buttons",
"x": 0.1,
"y": 0,
}],
sliders=sliders
)
fig.show() | [
"cmontalvo@southalabama.edu"
] | cmontalvo@southalabama.edu |
860b34a29b05c457ba52d4db8cddb722d4684c96 | ba7c4862dfbc9d0469f389c0cdb3fed01f99ebe3 | /controls/bode/lead_lag_compensation.py | d939d88fe4fb919bdb196d213749e47cd1052ba7 | [] | no_license | cmontalvo251/Python | 293cbdf8832d7637d5c0b31eadd02d3ccf2f2c05 | 2b12ce043ee41e08537cfb62301c6a55d4661e04 | refs/heads/master | 2023-06-22T21:50:21.225067 | 2023-06-14T13:42:16 | 2023-06-14T13:42:16 | 229,313,158 | 8 | 3 | null | 2021-07-31T16:01:54 | 2019-12-20T18:03:52 | Python | UTF-8 | Python | false | false | 314 | py | import control as ctl
import numpy as np
import matplotlib.pyplot as plt
plt.close("all")
g= 9.81
L = 2.0
wn = np.sqrt(g/L)
G = ctl.tf([1],[1,0,wn**2])
print(G)
C = ctl.tf([1,1],[1,100])
print(C)
ctl.bode(C*G,dB=True)
plt.grid()
`gm,pm,wg,wp = ctl.margin(C*G)
print(gm,pm,wg,wp)
ctl.rlocus(C*G)
plt.show()
| [
"cmontalvo@southalabama.edu"
] | cmontalvo@southalabama.edu |
e57fbf4a5c2ba63f9063c9c5e88e364270db2ecb | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/core/2016/8/test_device_sun_light_trigger.py | 88c0bae60ec02bf1f88b3d8c7d9fb1e9ee41e430 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 3,997 | py | """The tests device sun light trigger component."""
# pylint: disable=too-many-public-methods,protected-access
import os
import unittest
import homeassistant.loader as loader
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.components import (
device_tracker, light, sun, device_sun_light_trigger)
from homeassistant.helpers import event_decorators
from tests.common import (
get_test_config_dir, get_test_home_assistant, ensure_sun_risen,
ensure_sun_set)
KNOWN_DEV_YAML_PATH = os.path.join(get_test_config_dir(),
device_tracker.YAML_DEVICES)
def setUpModule(): # pylint: disable=invalid-name
"""Write a device tracker known devices file to be used."""
device_tracker.update_config(
KNOWN_DEV_YAML_PATH, 'device_1', device_tracker.Device(
None, None, True, 'device_1', 'DEV1',
picture='http://example.com/dev1.jpg'))
device_tracker.update_config(
KNOWN_DEV_YAML_PATH, 'device_2', device_tracker.Device(
None, None, True, 'device_2', 'DEV2',
picture='http://example.com/dev2.jpg'))
def tearDownModule(): # pylint: disable=invalid-name
"""Remove device tracker known devices file."""
os.remove(KNOWN_DEV_YAML_PATH)
class TestDeviceSunLightTrigger(unittest.TestCase):
"""Test the device sun light trigger module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
event_decorators.HASS = self.hass
self.scanner = loader.get_component(
'device_tracker.test').get_scanner(None, None)
self.scanner.reset()
self.scanner.come_home('DEV1')
loader.get_component('light.test').init()
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(light.setup(self.hass, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(sun.setup(
self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}))
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
event_decorators.HASS = None
def test_lights_on_when_sun_sets(self):
"""Test lights go on when there is someone home and the sun sets."""
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
ensure_sun_risen(self.hass)
light.turn_off(self.hass)
self.hass.pool.block_till_done()
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
def test_lights_turn_off_when_everyone_leaves(self): \
# pylint: disable=invalid-name
"""Test lights turn off when everyone leaves the house."""
light.turn_on(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
self.hass.pool.block_till_done()
self.assertFalse(light.is_on(self.hass))
def test_lights_turn_on_when_coming_home_after_sun_set(self): \
# pylint: disable=invalid-name
"""Test lights turn on when coming home after sun set."""
light.turn_off(self.hass)
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
69982d63985cd4d63ce892aefb685f295fde5def | 892c7bd301eeadf57b546f039faf499448112ddc | /organizacion/migrations/0003_auto_20160803_2128.py | bd7797b580ff3d4c5a0737d23d02286217f6d4cc | [
"MIT"
] | permissive | ErickMurillo/aprocacaho | beed9c4b031cf26a362e44fc6a042b38ab246c27 | eecd216103e6b06e3ece174c89d911f27b50585a | refs/heads/master | 2022-11-23T15:03:32.687847 | 2019-07-01T19:16:37 | 2019-07-01T19:16:37 | 53,867,804 | 0 | 1 | MIT | 2022-11-22T01:02:51 | 2016-03-14T15:23:39 | HTML | UTF-8 | Python | false | false | 1,552 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-03 21:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizacion', '0002_auto_20160609_1700'),
]
operations = [
migrations.AlterField(
model_name='comercializacion',
name='no_socias_corriente',
field=models.FloatField(verbose_name='Mujeres (Fermentado)'),
),
migrations.AlterField(
model_name='comercializacion',
name='no_socios_corriente',
field=models.FloatField(verbose_name='Hombres (Fermentado)'),
),
migrations.AlterField(
model_name='comercializacion',
name='socias_corriente',
field=models.FloatField(verbose_name='Mujeres (Corriente)'),
),
migrations.AlterField(
model_name='comercializacion',
name='socios_corriente',
field=models.FloatField(verbose_name='Hombres (Corriente)'),
),
migrations.AlterField(
model_name='documentacion',
name='fecha',
field=models.DateField(verbose_name='Fecha de elaboraci\xf3n o actualizaci\xf3n'),
),
migrations.AlterField(
model_name='organizacion',
name='status',
field=models.IntegerField(choices=[(1, 'ONG'), (2, 'Cooperativa'), (3, 'Asociaci\xf3n'), (4, 'Proyectos')], verbose_name='Estatus Legal'),
),
]
| [
"erickmurillo22@gmail.com"
] | erickmurillo22@gmail.com |
d8e6ca33f5737e8e3a8c6b75975ab03d158cca5c | 97221d2166bc075d83c64d346434c11f11f44007 | /vmscope/accounts/urls.py | 65fb3fa431f1bd1f5a11e0e591f2d309cd887dc6 | [] | no_license | likit/vmscope-django | a89aba3bd0d8abfc0d154be5872a7206fe1778fb | 9b2c315c049d240764892e718c1d49962d4935f4 | refs/heads/master | 2022-12-09T05:59:31.234466 | 2021-01-11T15:20:09 | 2021-01-11T15:20:09 | 143,836,786 | 0 | 0 | null | 2022-12-08T02:20:57 | 2018-08-07T07:32:37 | Python | UTF-8 | Python | false | false | 215 | py | from django.urls import path
from .views import SignUpView, update_profile
urlpatterns = [
path('signup/', SignUpView.as_view(), name='signup'),
path('edit_profile/', update_profile, name='edit_profile')
] | [
"likit.pre@mahidol.edu"
] | likit.pre@mahidol.edu |
d5014679f0e7629571ed4e5ec13dc761302636a4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_293/ch87_2020_05_12_23_44_25_135873.py | 5916eb298f5abbe724807df98f3a1a378325eeec | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | with open('churras.txt','r') as churrasco:
chur = churrasco.read()
churchur = chur.split()
soma = 0
for e in churchur:
lista_churras = e.split(",")
total = int(lista_churras[1])*float(lista_churras[2])
soma += total
print(soma) | [
"you@example.com"
] | you@example.com |
fd99b671f08d3b5252eed81c98ca24bff841fff4 | 1d2542fbc29c52ccd125c48ab957790ba2640e87 | /holdle/asyncio_spider.py | 077ca19872a83d7946eaa3654389ca5f90ec8b03 | [] | no_license | sixuerain/CrawlMan | 0a6c9b26f6e4e469b7b04dee82b93eeff3a026ae | 5b525417dd87d4f5db9b46b428001dd4c8a24d29 | refs/heads/master | 2023-03-18T11:00:16.297194 | 2021-02-26T08:22:54 | 2021-02-26T08:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | # -*- coding: utf-8 -*-
# website: http://30daydo.com
# @Time : 2020/9/22 10:07
# 异步爬取首页与列表
import sys
sys.path.append('..')
import asyncio
import datetime
import aiohttp
import re
import time
from parsel import Selector
from configure.settings import DBSelector
from common.BaseService import BaseService
SLEEP = 2
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'}
URL_MAP = {'home_page': 'https://holdle.com/stocks/industry', 'base': 'https://holdle.com'}
class AsyncMongo():
def __init__(self):
self.DB = DBSelector()
self.client = self.DB.mongo(location_type='qq', async_type=True)
self.db = self.client['db_stock']
async def update(self, table,data):
self.doc= self.db[table]
await self.doc.insert_many(data)
class Holdle(BaseService):
def __init__(self):
super(Holdle, self).__init__()
self.data_processor = AsyncMongo()
self.tables_list =['ROE','Cash_Ratio','Gross_Margin','Operation_Margin','Net_Profit_Ratio','Dividend_ratio']
async def home_page(self):
start = time.time()
async with aiohttp.ClientSession() as session:
async with session.get(url=URL_MAP['home_page'], headers=headers) as response:
html = await response.text() # 这个阻塞
resp = Selector(text=html)
industries = resp.xpath('//ul[@class="list-unstyled"]/a')
task_list = []
for industry in industries:
json_data = {}
industry_url = industry.xpath('.//@href').extract_first()
industry_name = industry.xpath('.//li/text()').extract_first()
industry_name = industry_name.replace('-', '').strip()
json_data['industry_url'] = industry_url
json_data['industry_name'] = industry_name
task = asyncio.ensure_future(self.detail_list(session, industry_url, json_data))
task_list.append(task)
await asyncio.gather(*task_list)
end = time.time()
print(f'time used {end - start}')
async def detail_list(self, session, url, json_data):
async with session.get(URL_MAP['base'] + url, headers=headers) as response:
response = await response.text()
await self.parse_detail(response, json_data)
async def parse_detail(self, html, json_data=None):
resp = Selector(text=html)
industry=json_data['industry_name']
tables = resp.xpath('//table[@class="table table-bordered"]')
if len(tables)!=6:
raise ValueError
for index,table in enumerate(self.tables_list):
rows = tables[index].xpath('.//tr')
result = []
for row in rows[1:]:
stock_name = row.xpath('.//td[1]/text()').extract_first()
value = row.xpath('.//td[2]/text()').extract_first()
value = float(value)
d={'industry':industry,'name':stock_name,'value':value,'crawltime':datetime.datetime.now()}
result.append(d)
await self.data_processor.update(table,result)
app = Holdle()
loop = asyncio.get_event_loop()
loop.run_until_complete(app.home_page())
| [
"jinweizsu@gmail.com"
] | jinweizsu@gmail.com |
d98234efa40fb798d6faa30258566f5240281566 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/smallestWindow_20200707160434.py | 7409791c55ac7cf067acac42549d2f5fd582c808 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | def min(s,t):
no_of_chars = 256
count = 0
start = 0
start_index = -1
min_len = float('inf')
print(start,start_index,min_len)
# first check if the length of the string is less than the string of the given pattern
if len(t)> len(s):
return ""
else:
# store the occurrences of the characters of the given pat in a hash pat
hash_pat = [0] * no_of_chars
hash_str = [0] * no_of_chars
# here we create a array where we store the number of occurences of a char based on its ascii value
for i in range(len(t)):
hash_pat[ord(t[i])] +=1
print(hash_pat)
for j in range(len(s)):
hash_str[ord(t[j])] +=1
if hash_pat[ord(t[j])] <= hash_str[ord(s[j])] and hash_pat[ord(t[j]) !=0]:
count +=1
# when the count gets to the length of the pattern string then the window string contains the pattern
if count == len(t):
# here we'll try minimize the window --> how
# if the window contains repeating characters that are not in the pattern
# we ignore them
# also if a character is there and not available in the pattern please ignore it
while(hash_str[ord(s[start])] > hash_pat[ord(string[start])] or hash_pat[ord(s[start])] == 0:
# first substring ADOBEC
/
min("ADOBECODEBANC","ABC") | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
9c6ef08a4f2ad7ea43a9ad6e3577602aba3521a9 | f3f76d228f7de399831f31d7d3ca8942e0c27d58 | /tutorial_coding_example.py | 1ac17fa1231e79284ce32ef860c65fa73bb9304f | [] | no_license | emylincon/line_encoding | 3cac93630d8416c0f513c172982b561c7fd7469a | 80a79b22501bac2371b6ad63ca6fe955608291f2 | refs/heads/master | 2021-01-04T11:54:42.411696 | 2020-02-15T21:40:13 | 2020-02-15T21:40:13 | 240,536,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,775 | py | import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
names = {1: "Unipolar NRZ", 2: "Unipolar RZ", 3: "Manchester coding", 4: "Differential Manchester coding"}
def line_convert(bits, no):
ax1.grid()
x1 = list(range(len(bits) + 1))
x = [0]
for i in x1[1:-1]:
x.append(i)
x.append(i)
x.append(x1[-1])
y = []
for i in bits:
y.append(int(i))
y.append(int(i))
# print(x,y)
ax1.plot(x, y, 'r-.o')
for i in range(len(bits)):
ax1.text(i + 0.5, 0.5, bits[i], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
ax1.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def rz_line_convert(bits, no):
ax2.grid()
x1 = list(range(len(bits) * 2 + 1))
x = [0]
for i in x1[1:-1]:
x += [int(i), int(i)]
x.append(x1[-1])
y = []
for i in bits:
if int(i) == 1:
y += [1, 1, 0, 0]
elif int(i) == 0:
y += [0, 0, 0, 0]
ax2.plot(x, y, 'g-.^')
j = 0
for i in range(0, len(bits) * 2, 2):
ax2.text(i + 1, 0.5, bits[j], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
j += 1
ax2.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def mc_line_convert(bits, no):
ax3.grid()
x1 = list(range(len(bits) * 2 + 1))
x = [0]
for i in x1[1:-1]:
x += [int(i), int(i)]
x.append(x1[-1])
y = []
for i in bits:
if int(i) == 1:
y += [1, 1, 0, 0]
elif int(i) == 0:
y += [0, 0, 1, 1]
ax3.plot(x, y, 'b-.s')
j = 0
for i in range(0, len(bits) * 2, 2):
ax3.text(i + 1, 0.5, bits[j], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
j += 1
ax3.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def differential_manchester(bits, no):
inp1 = [int(i) for i in bits]
li, lock, pre = [], False, ''
for i in range(len(inp1)):
if inp1[i] == 0 and not lock:
li.append(-1)
li.append(-1)
li.append(1)
lock = True
pre = 'S'
elif inp1[i] == 1 and not lock:
li.append(1)
li.append(1)
li.append(-1)
lock = True
pre = 'Z'
else:
if inp1[i] == 0:
if pre == 'S':
li.append(-1)
li.append(1)
else:
li.append(1)
li.append(-1)
else:
if pre == 'Z':
pre = 'S'
li.append(-1)
li.append(1)
else:
pre = 'Z'
li.append(1)
li.append(-1)
j = 0
for i in range(0, len(bits) * 2, 2):
ax4.text(i + 1, 0.5, bits[j], rotation=0, size=20,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(0., 0., 0.), fc=(0.98, 0.96, 0.59), ))
j += 1
ax4.grid()
ax4.plot(li, color='red', drawstyle='steps-pre', marker='>')
ax4.set_title(names[no], fontdict={'weight': 'bold', 'size': 17})
def plot(bits):
line_convert(bits, 1)
mc_line_convert(bits, 3)
rz_line_convert(bits, 2)
differential_manchester(bits, 4)
plt.subplots_adjust(hspace=0.55)
plt.show()
if __name__ == '__main__':
plot(input('Enter the binary bits sequence: ').strip())
| [
"emylincon@gmail.com"
] | emylincon@gmail.com |
4730d785505d4b447c0411e7674925389bef38ed | b6c7f367306f8f3d9fad7551810c68b392a1b879 | /omoide/tests/unit/test_utils.py | 92cf7e3191f512d335d542af0351cc8503a66dc8 | [
"MIT"
] | permissive | TaXeH/Omoide | c96ef35b1394125fc98367e8a9ef1674718e7e9e | 8ccc9d47e802433bb2de21ff930e6630658cd5e3 | refs/heads/main | 2023-07-18T12:00:15.469707 | 2021-08-28T11:37:23 | 2021-08-28T11:37:23 | 400,773,814 | 0 | 0 | MIT | 2021-08-28T11:17:55 | 2021-08-28T11:17:55 | null | UTF-8 | Python | false | false | 2,579 | py | # -*- coding: utf-8 -*-
"""Tests.
"""
from omoide import utils
def test_byte_count_to_text_ru():
"""Must convert to readable size in russian."""
func = utils.byte_count_to_text
assert func(-2_000, language='RU') == '-2.0 КиБ'
assert func(-2_048, language='RU') == '-2.0 КиБ'
assert func(0, language='RU') == '0 Б'
assert func(27, language='RU') == '27 Б'
assert func(999, language='RU') == '999 Б'
assert func(1_000, language='RU') == '1000 Б'
assert func(1_023, language='RU') == '1023 Б'
assert func(1_024, language='RU') == '1.0 КиБ'
assert func(1_728, language='RU') == '1.7 КиБ'
assert func(110_592, language='RU') == '108.0 КиБ'
assert func(1_000_000, language='RU') == '976.6 КиБ'
assert func(7_077_888, language='RU') == '6.8 МиБ'
assert func(452_984_832, language='RU') == '432.0 МиБ'
assert func(1_000_000_000, language='RU') == '953.7 МиБ'
assert func(28_991_029_248, language='RU') == '27.0 ГиБ'
assert func(1_855_425_871_872, language='RU') == '1.7 ТиБ'
assert func(9_223_372_036_854_775_807, language='RU') == '8.0 ЭиБ'
def test_byte_count_to_text_en():
"""Must convert to readable size in english."""
func = utils.byte_count_to_text
assert func(-2_000, language='EN') == '-2.0 KiB'
assert func(-2_048, language='EN') == '-2.0 KiB'
assert func(0, language='EN') == '0 B'
assert func(27, language='EN') == '27 B'
assert func(999, language='EN') == '999 B'
assert func(1_000, language='EN') == '1000 B'
assert func(1_023, language='EN') == '1023 B'
assert func(1_024, language='EN') == '1.0 KiB'
assert func(1_728, language='EN') == '1.7 KiB'
assert func(110_592, language='EN') == '108.0 KiB'
assert func(1_000_000, language='EN') == '976.6 KiB'
assert func(7_077_888, language='EN') == '6.8 MiB'
assert func(452_984_832, language='EN') == '432.0 MiB'
assert func(1_000_000_000, language='EN') == '953.7 MiB'
assert func(28_991_029_248, language='EN') == '27.0 GiB'
assert func(1_855_425_871_872, language='EN') == '1.7 TiB'
assert func(9_223_372_036_854_775_807, language='EN') == '8.0 EiB'
def test_sep_digits():
"""Must separate digits on 1000s."""
func = utils.sep_digits
assert func('12345678') == '12345678'
assert func(12345678) == '12 345 678'
assert func(1234.5678) == '1 234.57'
assert func(1234.5678, precision=4) == '1 234.5678'
assert func(1234.0, precision=4) == '1 234.0000'
assert func(1234.0, precision=0) == '1 234'
| [
"nicord@yandex.ru"
] | nicord@yandex.ru |
56805d4ddf91239e2f1e5d9219a569abc53ef185 | 48deb00f51be78bd0e2eac898428a783b1fb95da | /ejemplos/funcion_palindromo.py | 0dcef6a2c971bfd2d1ed5d36358323128864c922 | [] | no_license | jkaalexkei/TrabajosEnPython | 435bc77982bc3b44f080e28732ff0c439a2c41cd | 764965ddc6736e94e61d7ab8419cc5e33a767a0b | refs/heads/master | 2023-06-04T21:35:56.522264 | 2021-06-22T02:04:51 | 2021-06-22T02:04:51 | 352,990,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py |
def palindromo(texto):
lista =[]
for i in texto:
lista.append(i)
print(lista)
lista_inversa = lista[::-1]#se invierte la lista
print(lista_inversa)
if lista == lista_inversa:
print("verdadero, si es palindromo")
else:
print("Falso, no es palindromo")
palindromo("alex")
| [
"jkaalexkei@gmail.com"
] | jkaalexkei@gmail.com |
3ac66d934734757dcd65a9fc45415ee138398ade | ec9431bc95f5b832876e16351967aef19b1395d7 | /ci-hpc/visualisation/www/plot/highcharts.py | 2f107515cadada74fee026073ba8c3fbe4d2c584 | [
"MIT"
] | permissive | elichad/ci-hpc | 743b828c1e410d6dc1d457121e9508ee2de6f420 | 62c45a2e12a515a7034edafc2f3139f820f8bfeb | refs/heads/master | 2020-03-26T06:09:45.508223 | 2018-08-10T09:30:25 | 2018-08-10T09:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,640 | py | #!/usr/bin/python3
# author: Jan Hybs
import collections
import copy
import pandas as pd
import numpy as np
import utils.dateutils as dateutils
from artifacts.db.mongo import Fields as db
from utils.datautils import ensure_iterable
from utils.logging import logger
from utils.timer import Timer
from visualisation.www.plot.highcharts_config import HighchartsConfig, HighchartsSeries
from visualisation.www.plot.highcharts_config import HighchartsChart as Chart
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def copy(self):
"""
:rtype: dotdict
"""
return self.copy_cls(dotdict)
def copy_cls(self, cls):
result = cls()
for k, v in self.items():
if isinstance(v, dict):
result[k] = v.copy()
else:
result[k] = copy.copy(v)
return result
def merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: dict
"""
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def _fillna(df):
return df.where(pd.notnull(df), None)
def _group_data(df, agg, x=db.GIT_DATETIME, y=db.DURATION, rename=None):
"""
:type rename: dict
"""
with Timer('highcharts: data group: agg', log=logger.debug):
result = df.groupby(x).aggregate(agg).reset_index()
if rename is False:
return result
if rename is None:
rename = dict(x=x, y=y)
dels = set()
for k, v in rename.items():
result[v] = result[k]
dels.add(k)
for k in dels:
del result[k]
return result
def _ci_area(df, ci=(+0.05, -0.05), shift=1):
result = list()
for c in ci:
d = df.copy()
d['y'] = df['y'] + df['y'] * c
ys = list(d['y'].values)
if shift and len(ys) > shift:
ys = ys[:shift] + ys[:-shift]
d['y'] = ys
d = _fillna(d)
result.append(d)
return result
def highcharts_frame_in_time(df, config, estimator=np.mean, title=None, color=None, args=None, add_std=True, add_errorbar=True, metric_name=None):
"""
:type config: visualisation.www.plot.cfg.project_config.ProjectConfig
:type args: argparse.Namespace
:type df: pd.DataFrame
"""
x = config.test_view.x_prop
y = config.test_view.y_prop
linetype = Chart.TYPE_SPLINE if config.test_view.smooth else Chart.TYPE_LINE
areatype = Chart.TYPE_AREA_SPLINE_RANGE if config.test_view.smooth else Chart.TYPE_AREA_RANGE
agg, renames = config.get_test_view_groupby()
agg.update({
y: [estimator, np.std],
})
renames.update({
x: 'x'
})
with Timer('highcharts: data group', log=logger.debug):
result = _group_data(
df, agg, x=x, rename=renames
)
commits, uuids = result['commit'], result['id']
mean, std = result[y]['mean'], result[y]['std']
stds = pd.DataFrame()
stds['x'] = result['x']
stds['low'] = mean - std
stds['high'] = mean + std
e5 = pd.DataFrame()
e5['x'] = result['x']
e5['low'] = mean - mean*0.025
e5['high'] = mean + mean*0.025
e10 = pd.DataFrame()
e10['x'] = result['x']
e10['low'] = mean - mean*0.05
e10['high'] = mean + mean*0.05
means = pd.DataFrame()
means['x'] = result['x']
means['y'] = mean
# obj.rangeSelector = dotdict(selected=1)
# obj.showInNavigator = True
obj = HighchartsConfig()
obj.title.text = title
obj.xAxis.title.text = config.test_view.x_prop
obj.yAxis.title.text = config.test_view.y_prop
obj.xAxis.type = 'category'
obj.add(HighchartsSeries(
type=linetype,
name='mean' if not metric_name else metric_name,
data=means,
commits=commits,
marker=dotdict(enabled=True),
uuids=uuids,
point=dotdict(events=dotdict()),
color=color,
allowPointSelect=True,
zIndex=1,
))
if add_std:
obj.add(HighchartsSeries(
type=areatype,
name='std',
data=stds,
commits=commits,
uuids=uuids,
color='rgba(0, 0, 0, 0.2)',
fillColor='rgba(0, 0, 0, 0.05)',
dashStyle='Dash',
)),
if add_errorbar:
obj.add(HighchartsSeries(
type='errorbar',
name='e5',
data=e5,
commits=commits,
uuids=uuids,
color='rgba(0, 0, 0, 0.3)',
# stemColor='#FF0000',
# whiskerColor='#FF0000',
lineWidth=0.5,
))
obj.add(HighchartsSeries(
type='errorbar',
name='e10',
data=e10,
commits=commits,
uuids=uuids,
color='rgba(0, 0, 0, 0.3)',
# stemColor='#FF0000',
# whiskerColor='#FF0000',
lineWidth=0.5,
))
return obj
def _rename (df, **kwargs):
"""
:rtype: pd.DataFrame
:type df: pd.DataFrame
"""
dels = set()
for k, v in kwargs.items():
if v is None:
del df[k]
else:
df[k] = df[v]
if k != v:
dels.add(v)
for d in dels:
del df[d]
return df
def highcharts_frame_bar(df, config):
"""
:type df: pd.DataFrame
:type config: visualisation.www.plot.cfg.project_config.ProjectConfig
"""
x = config.frame_view.x_prop
y = config.frame_view.y_prop
df = df.sort_values(by=y, ascending=False)
df = df[df[y] > 0.1]
# df[args.rename['name']] = df[args.rename['name']].apply(lambda x: '\n'.join(x.split('::')))
rename = {
'y': y,
'name': x,
'path': x,
}
rename.update(config.frame_view.groupby)
df = _rename(df, **rename)
obj = HighchartsConfig()
obj.tooltip.pointFormat = 'duration <b>{point.y:.2f}</b> sec'
obj.xAxis.title.text = 'frame'
obj.yAxis.title.text = 'duration [sec]'
obj.xAxis.type = 'category'
obj.legend.align = 'center'
obj.legend.verticalAlign = 'bottom'
obj.chart.zoomType = 'x'
obj.title.text = 'Frame breakdown'
# obj.xAxis.scrollbar = dotdict(enabled=True)
# obj.xAxis.min = 0
# obj.xAxis.max = 4
# obj.xAxis.tickLength = 0
# https://jsfiddle.net/gh/get/library/pure/highcharts/highcharts/tree/master/samples/stock/yaxis/inverted-bar-scrollbar
# del obj.yAxis
names = list(config.frame_view.groupby.keys())
for groupby_name, groupby_data in df.groupby(names):
title_dict = dict(zip(names, ensure_iterable(groupby_name)))
obj.add(HighchartsSeries(
type='bar',
name=', '.join('%s=<b>%s</b>' % (str(k), str(v)) for k, v in title_dict.items()),
data=groupby_data.to_dict('records'),
dataLabels=dotdict(
enabled=False,
format='{y:.2f} sec'
)
))
return obj
| [
"jan.hybs@tul.cz"
] | jan.hybs@tul.cz |
ae2ec3f90bade82ccddeed661b5e315c0a5f5a14 | 1fdad1e45625edf2fdaf26ca284054461839a2d4 | /rmzfzc/rmzfzc/spiders/jilin_zwgk.py | 0bdaaba3f5ff6751cf7f78211ea6d56d11d32b53 | [] | no_license | Mdxue/crawler-scrapy | b99a61f2fa0eff7a1194495314acdb7907018369 | ed4dcf5ffc15b60f9efa5d7e5093872343d2bbd0 | refs/heads/master | 2020-12-09T08:02:53.134473 | 2020-01-11T13:44:28 | 2020-01-11T13:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,292 | py | # -*- coding: utf-8 -*-
import scrapy
import logging
from scrapy_splash import SplashRequest
from rmzfzc.items import rmzfzcItem
import time
from utils.tools.attachment import get_attachments,get_times
script = """
function wait_for_element(splash, css, maxwait)
-- Wait until a selector matches an element
-- in the page. Return an error if waited more
-- than maxwait seconds.
if maxwait == nil then
maxwait = 10
end
return splash:wait_for_resume(string.format([[
function main(splash) {
var selector = '%s';
var maxwait = %s;
var end = Date.now() + maxwait*1000;
function check() {
if(document.querySelector(selector)) {
splash.resume('Element found');
} else if(Date.now() >= end) {
var err = 'Timeout waiting for element';
splash.error(err + " " + selector);
} else {
setTimeout(check, 200);
}
}
check();
}
]], css, maxwait))
end
function main(splash, args)
splash:go(args.url)
assert(splash:wait(0.1))
wait_for_element(splash, "#content > tr > td")
js = string.format("document.querySelector('#jump').value =%s", args.page)
splash:evaljs(js)
assert(splash:wait(0.1))
splash:runjs("document.querySelector('.tmptabel').innerHTML = ''")
assert(splash:wait(0.1))
splash:runjs("document.querySelector('.go-button').click()")
assert(splash:wait(0.1))
wait_for_element(splash, ".tmptabel > tbody > tr > td")
return splash:html()
end
"""
class GansuSpider(scrapy.Spider):
name = 'jilin_zwgk'
custom_settings = {
'CONCURRENT_REQUESTS': 10,
'CONCURRENT_REQUESTS_PER_DOMAIN': 10,
'CONCURRENT_REQUESTS_PER_IP': 0,
'DOWNLOAD_DELAY': 0.5,
'DOWNLOADER_MIDDLEWARES' : {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
'SPIDER_MIDDLEWARES' : {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
'HTTPCACHE_STORAGE' : 'scrapy_splash.SplashAwareFSCacheStorage',
# 'SPIDER_MIDDLEWARES': {
# 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
# },
# 'DOWNLOADER_MIDDLEWARES': {
# 'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
# 'utils.middlewares.MyUserAgentMiddleware.MyUserAgentMiddleware': 126,
# 'utils.middlewares.DeduplicateMiddleware.DeduplicateMiddleware': 130,
# },
'ITEM_PIPELINES': {
'utils.pipelines.MysqlTwistedPipeline.MysqlTwistedPipeline': 64,
'utils.pipelines.DuplicatesPipeline.DuplicatesPipeline': 100,
},
# 'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
# 'HTTPCACHE_STORAGE': 'scrapy_splash.SplashAwareFSCacheStorage',
'SPLASH_URL': "http://localhost:8050/"}
def __init__(self, pagenum=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_pagenum = pagenum
def start_requests(self):
try:
contents = [
{
'url': 'http://xxgk.jl.gov.cn/'
}
]
for content in contents:
yield SplashRequest(content['url'],
endpoint = 'execute',
args={
'lua_source': script,
'wait': 1,
'page': 2,
'url': content['url'],
},
callback=self.parse_page,
cb_kwargs=content)
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse_page(self, response, **kwargs):
page_count = int(self.parse_pagenum(response))
try:
for pagenum in range(page_count):
if pagenum > 1:
yield SplashRequest(kwargs['url'],
endpoint='execute',
args={
'lua_source': script,
'wait': 1,
'page': pagenum,
'url': kwargs['url']
},
callback=self.parse)
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse_pagenum(self, response):
try:
# 在解析页码的方法中判断是否增量爬取并设定爬取列表页数,如果运行
# 脚本时没有传入参数pagenum指定爬取前几页列表页,则全量爬取
if not self.add_pagenum:
return int(response.xpath('//*[@id="page-body"]/a[last()-1]/@data-page').extract_first()) + 1
return self.add_pagenum
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse(self, response):
for selector in response.xpath('//*[@class="zly_xxmu_20170104ulbg2"]'):
try:
item = {}
item['title'] = selector.xpath('./td[2]/div/a/text()').extract_first().strip()
item['time'] = selector.xpath('./td[5]/text()').extract_first().strip()
item['article_num'] = selector.xpath('./td[3]/text()').extract_first().strip()
href = selector.xpath('./td[2]/div/a/@href').extract_first()
yield scrapy.Request(href,callback=self.parse_item,dont_filter=True,cb_kwargs=item)
except Exception as e:
logging.error(self.name + ": " + e.__str__())
logging.exception(e)
def parse_item(self, response, **kwargs):
try:
item = rmzfzcItem()
appendix, appendix_name = get_attachments(response)
item['title'] = kwargs['title']
item['article_num'] = kwargs['article_num']
item['time'] = kwargs['time']
item['content'] = "".join(response.xpath('//div[@class="Custom_UnionStyle"]').extract())
item['source'] = ''
item['province'] = '吉林省'
item['city'] = ''
item['area'] = ''
item['website'] = '吉林省人民政府'
item['module_name'] = '吉林省人民政府-政务公开'
item['spider_name'] = 'jilin_zwgk'
item['txt'] = "".join(response.xpath('//div[@class="Custom_UnionStyle"]//text()').extract())
item['appendix_name'] = appendix_name
item['link'] = response.request.url
item['appendix'] = appendix
item['time'] = get_times(item['time'])
print(
"===========================>crawled one item" +
response.request.url)
except Exception as e:
logging.error(
self.name +
" in parse_item: url=" +
response.request.url +
", exception=" +
e.__str__())
logging.exception(e)
yield item
| [
"sn_baby@qq.com"
] | sn_baby@qq.com |
a42429003c82135a616d13a78e0327d1159b7f2f | 84350ae13162c3bb1978ab4340ebb95abca55d34 | /SampleSize/get_ns_sample_sizes.py | e4e48efead29bdd9cf1973e52e8cfb3520a37592 | [
"MIT"
] | permissive | poldracklab/ScanningTheHorizon | 5bd6b31bd7028f4724719c78cca3e89410d4f3e8 | 09b9f64eda5fc5c82e64679aa33d4c3c2685928b | refs/heads/master | 2021-06-14T08:24:59.989406 | 2021-02-10T14:25:27 | 2021-02-10T14:25:27 | 63,067,849 | 8 | 8 | null | 2016-12-10T23:50:12 | 2016-07-11T12:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 3,259 | py | # get_ns_sample_sizes.py - extract estimated sample size data from neurosynth
# Tal Yarkoni, 2016
import re
import traceback
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
"fifteen", "sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty",
"fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
# numwords["and"] = (1, 0)
for idx, word in enumerate(units):
numwords[word] = (1, idx)
for idx, word in enumerate(tens):
numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales):
numwords[word] = (
10 ** (idx * 3 or 2), 0)
ordinal_words = {'first': 1, 'second': 2, 'third': 3,
'fifth': 5, 'eighth': 8, 'ninth': 9, 'twelfth': 12}
ordinal_endings = [('ieth', 'y'), ('th', '')]
textnum = textnum.replace('-', ' ')
current = result = 0
for word in textnum.split():
if word in ordinal_words:
scale, increment = (1, ordinal_words[word])
else:
for ending, replacement in ordinal_endings:
if word.endswith(ending):
word = "%s%s" % (word[:-len(ending)], replacement)
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
def estimate_n(text):
text = text.lower()
populations = [
'volunteers', 'subjects', 'individuals', 'participants', 'students',
'patients', 'outpatients', 'undergraduates', 'adults', 'control',
'people', 'stroke', 'children'
]
pops = '|'.join(populations)
patt = '([a-zA-Z0-9\-]+)\s+([^\s]+\s+)?(%s)' % pops
matches = re.findall(patt, text)
n = []
for m in matches:
try:
# print(m)
m0 = m[0]
if unicode(m0).isnumeric():
n_ = int(m0)
else:
n_ = text2int(m0)
n.append((re.sub('\s+', ' ', ' '.join(m)), n_))
except:
pass
more = re.findall('[\(\s]+n\s*=\s*(\d+)', text)
n.extend([('n = %d' % int(m), int(m)) for m in more])
return n
c = 0
outf = open('estimated_n.txt', 'w')
outf.write('PMID\tn\tcaptured_group\tabstract\n')
for text in open('abstracts.txt').read().splitlines():
pmid, text = text.split('\t')
res = estimate_n(text)
if res:
for i, r in enumerate(res):
line = '\t'.join([pmid, str(r[1]), str(r[0])])
if i == 0:
line += '\t' + text
# ns = ', '.join([str(r[1]) for r in res])
# matches = ', '.join(['"%s"' % str(r[0]) for r in res]).strip()
# line = '\t'.join([pmid, ns, matches, text])
outf.write(line + '\n')
c += 1
print("Found %d abstracts." % c)
| [
"poldrack@gmail.com"
] | poldrack@gmail.com |
7a4b19779b85a11de4e2f07e299d2bdc2245323b | b6284d558812f07251adfbcda389611028043a4c | /brands/migrations/0006_auto_20180604_0913.py | ffda114cfc851f64bdc47cc39cd8afb239c1100c | [] | no_license | markmurimi/shoe-blog | 980c63c184007cadd5f59fa546f7328a38bed54e | 669f260dfcdb196e47a37d9e12bf7a377f709362 | refs/heads/master | 2020-03-19T00:07:00.550739 | 2018-06-04T13:47:11 | 2018-06-04T13:47:11 | 135,454,230 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-04 06:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('brands', '0005_brand_profile'),
]
operations = [
migrations.AlterField(
model_name='brand',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='brands.Profile'),
),
]
| [
"murimimg180@gmail.com"
] | murimimg180@gmail.com |
6759042deacc240255f80367f88e6bf8224b325a | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/reinstall_server_with_cloud_init_response.py | cfb9cc2e4bdefdc4bf87295e0a15170c6d533e4b | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ReinstallServerWithCloudInitResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str'
}
attribute_map = {
'job_id': 'job_id'
}
def __init__(self, job_id=None):
"""ReinstallServerWithCloudInitResponse - a model defined in huaweicloud sdk"""
super(ReinstallServerWithCloudInitResponse, self).__init__()
self._job_id = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
@property
def job_id(self):
"""Gets the job_id of this ReinstallServerWithCloudInitResponse.
提交任务成功后返回的任务ID,用户可以使用该ID对任务执行情况进行查询。
:return: The job_id of this ReinstallServerWithCloudInitResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ReinstallServerWithCloudInitResponse.
提交任务成功后返回的任务ID,用户可以使用该ID对任务执行情况进行查询。
:param job_id: The job_id of this ReinstallServerWithCloudInitResponse.
:type: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReinstallServerWithCloudInitResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
9f2c43f0d001e0f731da8d1a66178d1408dda476 | 384b9368ee0731ed62e48f38c40fc027f08a4b7f | /PycharmProjects/DeepLearning/MachineLearning/Mnist_layers.py | 29702c0870d337404ee60fb79d0cb8c60633bc17 | [] | no_license | secrecy27/MachineLearning | 2ff2d438a91d5b906c61b388deeebeb774f933d3 | f88e6477ec6e4087a347beb168e36b7fe0ea8359 | refs/heads/master | 2021-05-10T07:44:45.703761 | 2019-03-04T02:11:27 | 2019-03-04T02:11:27 | 118,853,812 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,876 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self.__build_network()
def __build_network(self):
with tf.variable_scope(self.name):
# train 여부 test : false / train :true
self.training = tf.placeholder(tf.bool)
self.X = tf.placeholder(tf.float32, shape=[None, 784])
X_img = tf.reshape(self.X, shape=[-1, 28, 28, 1])
self.Y = tf.placeholder(tf.float32, shape=[None, 10])
# ---------------------------
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
padding="SAME", strides=2)
dropout1 = tf.layers.dropout(inputs=pool1, rate=0.7, training=self.training)
# ---------------------------
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
padding="SAME", strides=2)
dropout2 = tf.layers.dropout(inputs=pool2, rate=0.7, training=self.training)
# ---------------------------
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2],
padding="SAME", strides=2)
dropout3 = tf.layers.dropout(inputs=pool3, rate=0.7, training=self.training)
# ---------------------------
X_flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=X_flat, units=625, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4, rate=0.5, training=self.training)
self.logits = tf.layers.dense(inputs=dropout4, units=10)
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def predict(self, x, training=False):
return self.sess.run(self.logits, feed_dict={self.X: x, self.training: training})
def get_accuracy(self, x, y, training=False):
return self.sess.run(self.accuracy,
feed_dict={self.X: x, self.Y: y, self.training: training})
def train(self, x_data, y_data, training=True):
return self.sess.run([self.cost, self.optimizer],
feed_dict={self.X: x_data, self.Y: y_data, self.training: training})
sess = tf.Session()
main = Model(sess, "main")
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
cost, _ = main.train(batch_xs, batch_ys)
avg_cost += cost / total_batch
print("Epoch : ", "%04d" % (epoch + 1), "cost = ", "{:.9f}".format(avg_cost))
print("Accuracy : ", main.get_accuracy(mnist.test.images, mnist.test.labels))
| [
"secrecy418@naver.com"
] | secrecy418@naver.com |
65b9d19d5490262f91494a7fbd37661c397b0a1e | 6f3647ede6498a09dd2556130aebe30c9f80db43 | /백트래킹/2580.py | 6e6d1a09d26aca6b9b285868286ec5a891ad63b6 | [] | no_license | dohee479/BAEKJOON | 727a4eb1ce15879017e8edc1662b623babfa3801 | 77ed46da7e0de17d7f3ec9faf5b1bb1efa7b9d6b | refs/heads/master | 2023-03-20T01:30:22.997774 | 2021-03-02T14:54:38 | 2021-03-02T14:54:38 | 275,624,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | # 스도쿠
import sys
sys.stdin = open('input.txt', 'r')
# 가로 체크
def horizontal(x, val):
if val in sudoku[x]:
return False
return True
# 세로 체크
def vertical(y, val):
for i in range(9):
if val == sudoku[i][y]:
return False
return True
# 3x3 체크
def square(x, y, val):
nx = x//3 * 3
ny = y//3 * 3
for i in range(3):
for j in range(3):
if val == sudoku[nx+i][ny+j]:
return False
return True
def backtrack(index):
if index == len(zeros):
for row in sudoku:
for n in row:
print(n, end=" ")
print()
sys.exit(0)
else:
for i in range(1, 10):
nx = zeros[index][0]
ny = zeros[index][1]
# 세로, 가로, 3x3에 내가 대체하려고하는 숫자가 존재하는지 확인
if horizontal(nx, i) and vertical(ny, i) and square(nx, ny, i):
sudoku[nx][ny] = i
backtrack(index+1)
sudoku[nx][ny] = 0
sudoku = [list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(9)]
zeros = [(i, j) for i in range(9) for j in range(9) if sudoku[i][j] == 0]
backtrack(0)
| [
"dohee479@naver.com"
] | dohee479@naver.com |
625838e78bffab40389132c5f4db754939aa0280 | 67a95330832e8bb83d65b22c6a58badaf416c043 | /01_examples/my_close.py | 262b66cd95fb7ba89ec9cb991ac7dea5fd98c4f0 | [
"MIT"
] | permissive | jabbalaci/GUI-Dev-with-Python | af57fdccd59cd48cd2172988fdaa4074d4ac8c09 | a4ac137965156621f3cf418018ef28f2c20541d9 | refs/heads/master | 2020-09-07T04:41:18.920856 | 2019-11-09T15:56:20 | 2019-11-09T15:56:20 | 220,657,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python3
import sys
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QApplication, QMainWindow, QAction, QShortcut
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.shortcutQuit = QShortcut(QKeySequence("Ctrl+Q"), self)
self.shortcutQuit.activated.connect(self.close)
self.InitWindow()
def closeEvent(self, event):
print("# doing some cleanup...")
def InitWindow(self):
self.mainMenu = self.menuBar()
fileMenu = self.mainMenu.addMenu("&File")
quitItem = QAction("Quit", self)
quitItem.setShortcut("Q")
quitItem.triggered.connect(self.close)
fileMenu.addAction(quitItem)
if __name__ == "__main__":
App = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(App.exec()) | [
"jabba.laci@gmail.com"
] | jabba.laci@gmail.com |
88f4d105da6bb88b15b7a37352e9b8f00e417880 | e1de075ee032cf0cebc3e467aa222e35e6e370e9 | /groups/migrations/0004_group_image.py | a09be9649caa8921f8835f234a2c8fe9a5de4a99 | [] | no_license | benaka-tech/scientract | b6c5d2f6ddc483d4023531b9904590f1fa24f35f | 9d89e80778ac1a44e5def4832fc2311a99d77f89 | refs/heads/master | 2022-11-21T12:32:12.217925 | 2020-10-18T05:16:25 | 2020-10-18T05:16:25 | 230,889,764 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.1 on 2019-07-17 04:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20190706_1030'),
]
operations = [
migrations.AddField(
model_name='group',
name='image',
field=models.ImageField(default='default.jpg', upload_to='group_pics'),
),
]
| [
"cjayanth35@gmail.com"
] | cjayanth35@gmail.com |
5532e746db3d8d199fa70f0b181057bac3d9c0d6 | d9d8b097197baeaadafdacccd86b520773c32d85 | /erifying an Alien Dictionary.py | 26271222349b60aacedd2074ccdf9257cdacb9cc | [] | no_license | RiddhiRex/Leetcode | 5e2f382182c735c6fc721120c6f32726d4cb7414 | eeaa632e4d2b103c79925e823a05072a7264460e | refs/heads/master | 2021-12-23T23:57:31.574066 | 2021-08-14T22:00:23 | 2021-08-14T22:00:23 | 136,531,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | class Solution(object):
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
l = len(words)
o = {w: i for i,w in enumerate(order)}
for i in range(len(words)-1):
w1= words[i]
w2= words[1+i]
for i in range(min(len(w1), len(w2))):
if w1[i]!=w2[i]:
if o[w1[i]]>o[w2[i]]:
return False
else:
if o[w1[i]]<o[w2[i]]:
break
if w2==w1[0:len(w2)] and len(w1)>len(w2):
return False
return True
| [
"noreply@github.com"
] | RiddhiRex.noreply@github.com |
2dcadf604ebbbfe0a8359400a37f53b2daecfeea | c88d702ef4b64ae90788f5bff007f6c8bc901cd3 | /the-versatile-sieve-of-eratosthenes/eratosthenessievestest.py | d133112a611a16a6552f5bbe0ba76592edd43c03 | [] | no_license | ehem/Nayuki-web-published-code | 566d3cc06d3d94405fa797098914884c8f8164fd | 22003ffb91fd6f63e206dd15b870065f664976f8 | refs/heads/master | 2020-07-30T00:47:27.107344 | 2016-11-18T21:44:29 | 2016-11-18T21:44:29 | 73,651,938 | 1 | 0 | null | 2016-11-14T00:19:06 | 2016-11-14T00:19:06 | null | UTF-8 | Python | false | false | 1,690 | py | #
# Test of variants of the sieve of Eratosthenes (Python)
# by Project Nayuki, 2016. Public domain.
# https://www.nayuki.io/page/the-versatile-sieve-of-eratosthenes
#
import eratosthenessieves, sys
def main():
test_values()
test_prefix_consistency()
def test_values():
assert eratosthenessieves.sieve_primeness(30) == [False, False, True, True, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, False, False, False, False, True, False]
assert eratosthenessieves.sieve_smallest_prime_factor(30) == [0, 1, 2, 3, 2, 5, 2, 7, 2, 3, 2, 11, 2, 13, 2, 3, 2, 17, 2, 19, 2, 3, 2, 23, 2, 5, 2, 3, 2, 29, 2]
assert eratosthenessieves.sieve_totient(30) == [0, 1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18, 8, 12, 10, 22, 8, 20, 12, 18, 12, 28, 8]
assert eratosthenessieves.sieve_omega(30) == [0, 0, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 2, 1, 3]
assert eratosthenessieves.sieve_radical(30) == [0, 1, 2, 3, 2, 5, 6, 7, 2, 3, 10, 11, 6, 13, 14, 15, 2, 17, 6, 19, 10, 21, 22, 23, 6, 5, 26, 3, 14, 29, 30]
def test_prefix_consistency():
N = 3000
FUNCS = [
eratosthenessieves.sieve_primeness,
eratosthenessieves.sieve_smallest_prime_factor,
eratosthenessieves.sieve_totient,
eratosthenessieves.sieve_omega,
eratosthenessieves.sieve_radical,
]
for func in FUNCS:
prev = []
for i in range(N):
cur = func(i)
assert len(cur) == len(prev) + 1
assert cur[ : -1] == prev
prev = cur
if __name__ == "__main__":
try:
assert False
sys.exit("Error: Need to run with assertions enabled")
except AssertionError:
main()
| [
"me@nayuki.io"
] | me@nayuki.io |
ce0f1fd01e0c92f3db50f007aee37f83ec422233 | 1afa1b1929d1cd463cd9970174dd58ce2ca6eb1e | /configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py | 35486efd42f3d22eaed88e076420094ab7358379 | [
"Apache-2.0"
] | permissive | CAU-HE/CMCDNet | 2328594bf4b883384c691099c72e119b65909121 | 31e660f81f3b625916a4c4d60cd606dcc8717f81 | refs/heads/main | 2023-08-08T17:21:57.199728 | 2023-07-28T07:34:40 | 2023-07-28T07:34:40 | 589,927,845 | 12 | 1 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | _base_ = './dnl_r50-d8_512x1024_40k_cityscapes.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
| [
"flyhxn@qq.com"
] | flyhxn@qq.com |
d83ecf1d144629c8d144fbf6023add38a6bfd419 | 106983cf0b8df622f514ecff2bb2fa4c794c9dac | /Misc/Raspberry Pi Things/SimpleCV/sportsballs.py | 49c60bf7ccc25b67065b125f48b3a7fd61125480 | [] | no_license | michael5486/Senior-Design | 2d9ae521c637abf7c0825f85b32752ad61c62744 | 6b6c78bed5f20582a9753a9c10020c709d6b6e53 | refs/heads/master | 2021-01-19T09:58:35.378164 | 2017-05-26T17:17:13 | 2017-05-26T17:17:13 | 67,556,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from SimpleCV import Color, Image
import time
img = Image("sportsballs.jpg")
circles = img.findCircle(canny=200, thresh=250, distance=15)
circles.sortArea()
circles.draw(width=4)
circles[0].draw(color=Color.RED, width = 4)
img_with_circles = img.applyLayers()
edges_in_image = img.edges(t2=200)
final = img.sideBySide(edges_in_image.sideBySide(img_with_circles)).scale(0.5)
final.show()
time.sleep(15) | [
"michael5486@gmail.com"
] | michael5486@gmail.com |
0356ceefabfd8f1609a9c3d6d709691476e619e4 | 7506c49859870af9e62c3e919857ffcdf2e9a19e | /book2/tf_test/keras_inception2.py | b85281630d569a8b110336815cd45ff6cc371f84 | [] | no_license | Git2191866109/BookStudy | d363717285a5e9767e582f6efd1258680fa26f80 | f172244218871372ca94286c3db64cf334627ef3 | refs/heads/master | 2022-11-08T00:15:00.963332 | 2020-06-28T10:28:33 | 2020-06-28T10:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,550 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: keras_inception2.py
@time: 2019/5/6 15:43
@desc: 用原生态的Keras实现Inception
"""
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten
import keras
from keras.models import Model
from keras.datasets import mnist
from keras import backend as K
# 使用前面介绍的类似方法生成trainX、trainY、testX、testY,唯一的不同是这里只用了
# 全连接层,所以不需要将输入整理成三维矩阵。
num_calsses = 10
img_rows, img_cols = 28, 28
# 通过Keras封装好的API加载MNIST数据。其中trainX就是一个60000x28x28的数组,
# trainY是每一张图片对应的数字。
(trainX, trainY), (testX, testY) = mnist.load_data()
if K.image_data_format() == 'channels_first':
trainX = trainX.reshape(trainX.shape[0], 1, img_rows, img_cols)
testX = testX.reshape(trainX.shape[0], 1, img_rows, img_cols)
# 因为MNIST中的图片是黑白的,所以第一维的取值为1
input_shape = (1, img_rows, img_cols)
else:
trainX = trainX.reshape(trainX.shape[0], img_rows, img_cols, 1)
testX = testX.reshape(testX.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# 将图像像素转化为0到1之间的实数。
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255.0
testX /= 255.0
# 将标准答案转化为需要的格式(One-hot编码)。
trainY = keras.utils.to_categorical(trainY, num_calsses)
testY = keras.utils.to_categorical(testY, num_calsses)
# 定义输入图像尺寸
input_img = Input(shape=(28, 28, 1))
# 定义第一个分支。
tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
# 定义第二个分支。与顺序模型不同,第二个分支的输入使用的是input_img,而不是第一个分支的输出。
tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)
# 定义第三个分支。类似地,第三个分支的输入也是input_img。
tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)
# 将三个分支通过concatenate的方式拼凑在一起。
output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)
# 将卷积层的输出拉直后作为下面全连接的输入。
tower_4 = Flatten()(output)
# 全连接层,有500个节点。
tower_5 = Dense(500, activation='relu')(tower_4)
# 全连接层,得到最后的输出。
predictions = Dense(num_calsses, activation='softmax')(tower_5)
# 通过Model类创建模型,和Sequential类不同的是Model类在初始化的时候需要指定模型的输入和输出
model = Model(inputs=input_img, outputs=predictions)
# 使用与前面类似的方法定义损失函数、优化函数和评测方法。
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(), metrics=['accuracy'])
# 使用与前面类似的方法训练模型。
model.fit(trainX, trainY, batch_size=128, epochs=20, validation_data=(testX, testY))
# 在测试数据上评测模型。
score = model.evaluate(testX, testY, batch_size=128)
print('Test loss: ', score[0])
print('Test accuracy: ', score[1]) | [
"694317828@qq.com"
] | 694317828@qq.com |
5a7f4fcc5349f7c3cf44fdf7599a2ecb726ac6e8 | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-gc-shared/profiles/migrations/0016_userprofile_risk_appetite.py | 5289f7c30df4582aa664b83ef65249cc6c01a66d | [] | no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0015_auto_20170118_1908'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='risk_appetite',
field=models.BooleanField(default=False),
),
]
| [
"ibalyko@ubuntu-server-16-04"
] | ibalyko@ubuntu-server-16-04 |
7efa3c706308d3f3fb706e4ca6d7d04a22d29144 | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /Network/Renew IP Configuration/renew-ip-configuration.py | 40ebc13c2dc4e93b13e23b39afab411ca5f3fb03 | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 644 | py | import os
import ctypes
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
with disable_file_system_redirection():
release=os.popen('ipconfig /release').read();
print(release);
renew=os.popen('ipconfig /renew').read();
print(renew);
| [
"noreply@github.com"
] | kannanch.noreply@github.com |
579cc5aad5e975d5285f65e46ab296b71ec91288 | 87d33dc5f071d00723da819a62f01ecda6db0244 | /backend/api/models/vehicle_fuel_type.py | 2f0cbf5e8cddfbcce7a7c0b79bd69bf917dd03c2 | [
"Apache-2.0"
] | permissive | AntonCoetzer/zeva | bc961d1cf1e520ddc4a88e4dc826e317772737dc | 500ed05694348f9084784ca8ec9aafa9b7f4371f | refs/heads/master | 2020-12-23T06:33:04.149965 | 2020-01-28T20:00:09 | 2020-01-28T20:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | from django.db import models
from api.models.mixins.effective_dates import EffectiveDates
from auditable.models import Auditable
from .mixins.named import Description
class FuelType(Auditable, Description, EffectiveDates):
vehicle_fuel_code = models.CharField(
blank=False,
db_comment="Fuel type (e.g. B, BX, BZ)",
max_length=3,
null=False,
unique=True
)
class Meta:
db_table = 'vehicle_fuel_type'
db_table_comment = "Fuel type of the vehicle as defined in NRCAN" \
"e.g. B - Electricity" \
"BX - Electricity/Regular Gasoline" \
"BZ - Electricity/Premium Gasoline"
| [
"31664961+kuanfandevops@users.noreply.github.com"
] | 31664961+kuanfandevops@users.noreply.github.com |
24911b11fad215799ee373c5a29640eef7216591 | 26fdd3419c1855f180d7e9bea3b59459ba9e6446 | /venv/lib/python3.6/site-packages/oauthlib/oauth2/rfc6749/endpoints/base.py | c0fc72691170a3514b88711fe5b0a2a7fbc25395 | [] | permissive | vansh1999/fashion-ecomm | eed52884ac007928260f50a885bec963d85a88d2 | 5879d0b1c64411485e861dfc9bcca6b4a82afc57 | refs/heads/master | 2021-06-24T21:58:26.931849 | 2021-04-10T08:37:50 | 2021-04-10T08:37:50 | 219,543,353 | 1 | 0 | Apache-2.0 | 2021-04-10T08:37:51 | 2019-11-04T16:14:06 | Python | UTF-8 | Python | false | false | 3,007 | py | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import functools
import logging
from ..errors import (FatalClientError, OAuth2Error, ServerError,
TemporarilyUnavailableError, InvalidRequestError,
InvalidClientError, UnsupportedTokenTypeError)
log = logging.getLogger(__name__)
class BaseEndpoint(object):
def __init__(self):
self._available = True
self._catch_errors = False
@property
def available(self):
return self._available
@available.setter
def available(self, available):
self._available = available
@property
def catch_errors(self):
return self._catch_errors
@catch_errors.setter
def catch_errors(self, catch_errors):
self._catch_errors = catch_errors
def _raise_on_missing_token(self, request):
"""Raise error on missing token."""
if not request.token:
raise InvalidRequestError(request=request,
description='Missing token parameter.')
def _raise_on_invalid_client(self, request):
"""Raise on failed client authentication."""
if self.request_validator.client_authentication_required(request):
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise InvalidClientError(request=request)
def _raise_on_unsupported_token(self, request):
"""Raise on unsupported tokens."""
if (request.token_type_hint and
request.token_type_hint in self.valid_token_types and
request.token_type_hint not in self.supported_token_types):
raise UnsupportedTokenTypeError(request=request)
def catch_errors_and_unavailability(f):
@functools.wraps(f)
def wrapper(endpoint, uri, *args, **kwargs):
if not endpoint.available:
e = TemporarilyUnavailableError()
log.info('Endpoint unavailable, ignoring request %s.' % uri)
return {}, e.json, 503
if endpoint.catch_errors:
try:
return f(endpoint, uri, *args, **kwargs)
except OAuth2Error:
raise
except FatalClientError:
raise
except Exception as e:
error = ServerError()
log.warning(
'Exception caught while processing request, %s.' % e)
return {}, error.json, 500
else:
return f(endpoint, uri, *args, **kwargs)
return wrapper
| [
"vansh.bhardwaj1999@gmail.com"
] | vansh.bhardwaj1999@gmail.com |
5f02870079502ff2794bbb095db80ebed49dd7d1 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/93f36b6c867e49c3bdbf84c064b0c842.py | e8e6a56b31f03144abc8f05a090f50e82b90b7b7 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 297 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
def hey(s):
if s.isupper():
return 'Whoa, chill out!'
elif s != '' and s[-1] == '?' :
return 'Sure.'
elif s == '' or s.isspace():
return 'Fine. Be that way!'
else:
return "Whatever."
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
74f014742fb35dcae65ffef8e6013a171fbea7a2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2321/60623/305900.py | 95c8cdd08e3c24d9183b9dc840807c8375262ddf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | a=input().split(',')
b=input()
if a==['1','3','5','7'] and b='100':
print(20)
elif a[0]=='1':
print(29523)
elif a[0]=='20':
print(8)
else:
print(a)
print(b) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
8779357df2161b13bec2458e00e29592b9255a79 | 8c3755e907a8f7fbae4e5e3334aa9332f8f705bb | /oop/duck_private.py | d8ba463099236eceb21c50f4216f1aa2592ed915 | [] | no_license | xaneon/PythonProgrammingBasics | 20c9db82f621a41735856a0b008bf2c328d8e4b5 | accf4d16034d33e616b5ebe46f69c1130b09f85e | refs/heads/master | 2020-06-13T13:47:02.995326 | 2019-07-01T13:45:29 | 2019-07-01T13:45:29 | 194,235,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | class Ente:
def __init__(self, name):
self.name = name
self.pub = "Ich bin öffentlich"
self._prot = "Ich bin protected"
self.__priv = "Ich bin private"
meineEnte = Ente("Ente Helga")
print(meineEnte.name)
print(meineEnte.pub)
print(meineEnte._prot)
# print(meineEnte.__priv) # funktioniert nicht
| [
"bonne.habekost@gmail.com"
] | bonne.habekost@gmail.com |
50a41939dcaece2ee62cb4ecfa69a3b42812e2d6 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-alloydb/samples/generated_samples/alloydb_v1_generated_alloy_db_admin_failover_instance_sync.py | 5378712384df36085d5bf053c22c3a0de018eeca | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,928 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for FailoverInstance
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-alloydb
# [START alloydb_v1_generated_AlloyDBAdmin_FailoverInstance_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import alloydb_v1
def sample_failover_instance():
# Create a client
client = alloydb_v1.AlloyDBAdminClient()
# Initialize request argument(s)
request = alloydb_v1.FailoverInstanceRequest(
name="name_value",
)
# Make the request
operation = client.failover_instance(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END alloydb_v1_generated_AlloyDBAdmin_FailoverInstance_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
973d62af79f23603d12e59d2936310822445ccff | 497535fd65de15c1c39c53ceacc778aa557b42c8 | /penn_treebank_reader.py | 6d6915ad5c9b774211b5bf804fa53e5d5f6d18cd | [] | no_license | mrdrozdov/chart-parser | d44936f1872d2e8e18469dba5f6e8d6172ace53a | 5dfa79ed1aea2a11112a4320618e78c752520f46 | refs/heads/master | 2020-06-20T14:21:49.709284 | 2019-07-16T08:06:45 | 2019-07-16T08:06:45 | 197,149,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,788 | py | import json
import os
import nltk
from nltk.corpus import ptb
# For tests.
import unittest
####################
# Reader (raw PTB) #
####################
class RawPTBReader(object):
def __init__(self):
section2fileid = {}
for fileid in ptb.fileids():
if not fileid.startswith('WSJ'):
continue
section = int(fileid.split('/')[1])
section2fileid.setdefault(section, []).append(fileid)
self.tr_sections = [x for x in range(0, 22)]
self.va_sections = [x for x in range(22, 23)]
self.te_sections = [x for x in range(23, 24)]
self.section2fileid = section2fileid
def read_sections(self, sections):
for section in sections:
for fileid in self.section2fileid[section]:
for s in ptb.parsed_sents(fileid):
yield s
def read_tr(self):
return self.read_sections(self.tr_sections)
def read_va(self):
return self.read_sections(self.va_sections)
def read_te(self):
return self.read_sections(self.te_sections)
################################
# Converter (raw PTB -> jsonl) #
################################
def tree_to_string(tree):
def helper(tree):
if isinstance(tree, str):
return tree
out = '({}'.format(tree.label())
for x in tree:
out += ' {}'.format(helper(x))
return out + ')'
return helper(tree)
def tree_to_spans(tree):
spans = []
def helper(tree, pos=0):
if isinstance(tree, str):
return 1
size = 0
for x in tree:
spansize = helper(x, pos+size)
size += spansize
label = tree.label().split('-')[0] # TODO: This is wrong!
spans.append((pos, size, label))
return size
helper(tree)
return spans
class RawToJSONLConverter(object):
def __init__(self, saveto):
super(RawToJSONLConverter, self).__init__()
self.reader = RawPTBReader()
self.saveto = saveto
if not os.path.exists(self.saveto):
raise Exception('The `saveto` directory does not exist. ' + \
'Run: `mkdir -p {}`'.format(self.saveto))
def to_object(self, tree, example_id):
o = {}
o['example_id'] = example_id
o['sentence'] = tree.leaves()
o['parse'] = tree_to_string(tree)
o['spans'] = tree_to_spans(tree)
return o
def run(self):
count = 0
savepath = os.path.join(self.saveto, 'train.jsonl')
data = self.reader.read_tr()
with open(savepath, 'w') as f:
for tree in data:
f.write('{}\n'.format(json.dumps(self.to_object(tree, example_id='ptb{}'.format(count)))))
count += 1
savepath = os.path.join(self.saveto, 'valid.jsonl')
data = self.reader.read_va()
with open(savepath, 'w') as f:
for tree in data:
f.write('{}\n'.format(json.dumps(self.to_object(tree, example_id='ptb{}'.format(count)))))
count += 1
savepath = os.path.join(self.saveto, 'test.jsonl')
data = self.reader.read_te()
with open(savepath, 'w') as f:
for tree in data:
f.write('{}\n'.format(json.dumps(self.to_object(tree, example_id='ptb{}'.format(count)))))
count += 1
##################
# Reader (jsonl) #
##################
class JSONLReader(object):
def __init__(self, path):
super(JSONLReader, self).__init__()
self.path = path
def read(self):
with open(self.path) as f:
for line in f:
yield json.loads(line)
#########
# Tests #
#########
class TestPTBReader(object):
def __init__(self):
self.reader = RawPTBReader()
def run(self):
self.test_dataset_count()
def test_num_examples(self):
tr = [s for s in self.reader.read_tr()]
assert len(tr) == 43746
va = [s for s in self.reader.read_va()]
assert len(va) == 1700
te = [s for s in self.reader.read_te()]
assert len(te) == 2416
assert len(tr) + len(va) + len(te) == 47862
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--saveto', default=os.path.expanduser('~/data/ptb'), type=str)
parser.add_argument('--mode', default='test', choices=('test', 'convert', 'demo'))
options = parser.parse_args()
if options.mode == 'test':
TestPTBReader().run()
if options.mode == 'convert':
RawToJSONLConverter(options.saveto).run()
if options.mode == 'demo':
print(next(JSONLReader(os.path.join(options.saveto, 'train.jsonl')).read()))
| [
"andrew@mrdrozdov.com"
] | andrew@mrdrozdov.com |
e2fb370681ad6a240332bc2274c752d6b9e04960 | c9fd8f943918e3fa3f19edeea96cff2303368ab6 | /apps/quotes/urls.py | 9bb2416f6570f34f8eea50e8d98c4ef48e3e35f5 | [] | no_license | fgomesc/system_stocks | e363181837d4efe3e563d49e1a80869b91f0048c | 190e855c5e95f4459dfcf1204e8939ccac6f7778 | refs/heads/master | 2022-06-25T13:33:44.428654 | 2020-05-08T19:34:19 | 2020-05-08T19:34:19 | 262,412,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | from django.urls import path, include
from .views import home
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('about/', views.about, name='about'),
path('add_stocks.html', views.add_stocks, name='add_stocks'),
path('delete/<stock_id>', views.delete, name='delete'),
path('delete_stocks.html', views.delete_stocks, name='delete_stocks'),
]
| [
"fgomesc0586@gmail.com"
] | fgomesc0586@gmail.com |
48902b3ddf518fcde7dd331a6fd0e765785f9e38 | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_04_Code/4375OS_04_03_cumulative_standard_normal_CND.py | 2a6c8a23cd889e5cbf7786369ec7bdeffe6ba0ba | [] | no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | """
Name : 4375OS_04_03_cumulative_standard_normal_CND.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/25/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
from math import *
def CND(X):
"""
Objective: cumulative stardard normal distribution
X: input value
e.g.,
>>> CND(0)
0.5000000005248086
>>> CND(1)
0.8413447404368684
>>> CND(-1)
0.15865525956313165
>>> 1-CND(-1)
0.8413447404368684
>>>
"""
(a1,a2,a3,a4,a5)=(0.31938153,-0.356563782,1.781477937,-1.821255978,1.330274429)
L = abs(X)
K=1.0/(1.0+0.2316419*L)
w=1.0-1.0/sqrt(2*pi)*exp(-L*L/2.)*(a1*K+a2*K*K+a3*pow(K,3)+a4*pow(K,4)+a5*pow(K,5))
if X<0:
w = 1.0-w
return w
| [
"944727327@qq.com"
] | 944727327@qq.com |
7a1b05fc18498fd91411554f68ba46fa469f7957 | 26329cc5464a2aa69a2bc1636c71772efafdd467 | /lab 6/GameObjects/Player.py | 9396ad525308790419f076230f419f2fe9e46221 | [] | no_license | ThomasMGilman/ETGG-2801_2 | e71030d7368a929a24e20efddae346bd6b3a6173 | 209b37e79bd32fc41a544c29bf050e77f91bc71f | refs/heads/master | 2020-07-11T13:44:58.623191 | 2019-12-04T04:22:32 | 2019-12-04T04:22:32 | 204,556,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | from sdl2.keycode import *
from utilityLibs import glCommands, ImageTexture2DArray
from toolLibs import math3d
from GameObjects import Bullet, Shapes
#from GameObjects.Entity import *
import array, globs
class Player:#(Entity):
vbuff = None
tbuff = None
ibuff = None
vao = None
tex = None
def __init__(self, x, y, size):
self.pos = math3d.vec2(x, y) #set players start position
self.crouchScale = math3d.vec2(1, .5)
self.crouching = False
self.direction = 0 #-1:Left, 1:Right
self.lastFired = 0 #Time since last Fired
self.state = globs.ON_GROUND #State player is in
self.life = 10 #amount of life left
self.size = size #Scale of player
self.halfSize = size / 2 #half scale of player
#'''
if Player.vao == None:
Player.vbuff = array.array("f")
Player.tbuff = array.array("f")
Player.ibuff = array.array("I")
Shapes.createSquare(Player.vbuff, size, size, x, y)
Shapes.createSquareTextureArray(Player.tbuff)
Shapes.createSquareIndexArray(Player.ibuff)
Player.vao = glCommands.setup(Player.vbuff, Player.tbuff, Player.ibuff)
#'''
#super().__init__()
if Player.tex == None:
Player.tex = ImageTexture2DArray.ImageTexture2DArray(*globs.playerTextures);
def draw(self):
#print(str(self.pos))
if self.crouching:
glCommands.changeUniform(self.pos, self.crouchScale)
else:
glCommands.changeUniform(self.pos)
#super().draw(Player.tex)
glCommands.drawElement(glCommands.GL_TRIANGLES, len(Player.ibuff), Player.vao, Player.tex, 0, 0)
def update(self, elapsedTime):
if (SDLK_d or SDLK_RIGHT) in globs.keyset:
self.direction = globs.FACING_RIGHT
self.pos[0] += globs.playerSpeed * elapsedTime
if (SDLK_a or SDLK_LEFT) in globs.keyset:
self.direction = globs.FACING_LEFT
self.pos[0] -= globs.playerSpeed * elapsedTime
if (SDLK_s or SDLK_DOWN) in globs.keyset:
self.crouching = True
else:
self.crouching = False
if self.state == globs.RISING:
self.pos[1] += globs.playerSpeed * elapsedTime
elif self.state == globs.FALLING:
self.pos[1] -= globs.playerSpeed * elapsedTime
if SDLK_SPACE in globs.keyset and self.lastFired <= 0: #fireBullet
bulletPosY = self.pos[1]+self.halfSize
if self.crouching: bulletPosY *= .5
globs.objectsToDraw.append(Bullet.Bullet(self.pos[0], bulletPosY, self.direction))
self.lastFired = globs.playerFireRate
if SDLK_w in globs.keyset and self.state == globs.ON_GROUND:
self.state = globs.RISING
elif self.pos[1] >= globs.jumpPeak and self.state == globs.RISING:
self.pos[1] = globs.jumpPeak
self.state = globs.FALLING
elif self.pos[1] <= 0 and self.state == globs.FALLING:
self.pos[1] = 0
self.state = globs.ON_GROUND
self.lastFired -= elapsedTime
def alive(self):
return self.life > 0 | [
"Thomas.Gilman@ymail.com"
] | Thomas.Gilman@ymail.com |
d55024cc1d14013dab3c9fdb65756c1e8cb97845 | 1864af9eda58307024acbf7fe5d5f2f39f435e44 | /quickstart_guides/recursion/python/reverse_linked_list.py | 244a165ff3027aacc2733b2dde26717f6a2260f6 | [] | no_license | vprusso/6-Weeks-to-Interview-Ready | c393bbfe071d97cba12f0f0668e53a25fb25986f | 8105e1b20bf450a03a9bb910f344fc140e5ba703 | refs/heads/master | 2021-08-11T04:48:34.252178 | 2020-08-09T22:54:55 | 2020-08-09T22:54:55 | 210,997,768 | 6 | 2 | null | 2019-09-26T04:12:44 | 2019-09-26T04:12:44 | null | UTF-8 | Python | false | false | 2,103 | py | """
Title: Reverse linked list
Problem:
Given a linked list, write a function that prints the nodes of the list in
reverse order.
Execution: python reverse_linked_list.py
"""
import unittest
class Node:
"""Node class for linked list."""
def __init__(self, data) -> None:
self.data = data
self.next = None
class LinkedList:
"""Linked list class."""
def __init__(self):
self.head = None
def append(self, data) -> None:
"""Append to end of linked list."""
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
def reverse_recursive(self):
def _reverse_recursive(cur, prev) -> Node:
if not cur:
return prev
nxt = cur.next
cur.next = prev
prev = cur
cur = nxt
return _reverse_recursive(cur, prev)
self.head = _reverse_recursive(cur=self.head, prev=None)
class TestReverseLinkedList(unittest.TestCase):
"""Unit tests for reverse_linked_list."""
def test_1(self):
llist = LinkedList()
llist.append("A")
llist.append("B")
llist.append("C")
llist.append("D")
llist.reverse_recursive()
res = []
expected_res = ["D", "C", "B", "A"]
cur_node = llist.head
while cur_node:
res.append(cur_node.data)
cur_node = cur_node.next
self.assertEqual(expected_res, res)
def test_2(self):
llist = LinkedList()
llist.append(1)
llist.append(2)
llist.append(3)
llist.append(4)
llist.reverse_recursive()
res = []
expected_res = [4, 3, 2, 1]
cur_node = llist.head
while cur_node:
res.append(cur_node.data)
cur_node = cur_node.next
self.assertEqual(expected_res, res)
if __name__ == "__main__":
unittest.main()
| [
"vincentrusso1@gmail.com"
] | vincentrusso1@gmail.com |
5840d6184e90ee1b3c4b1a42b7b7153e6c2fd7d5 | b2203c60193d7707407fa269e47200f6e1804f0c | /deid/dicom/fields.py | a47e8d6b6c6fa43fb1fe67cd83d4287c18c96806 | [
"MIT"
] | permissive | ticlazau/deid | 4790c3f09a731a656dbe2e55570c1776bb1cbd86 | 827844dac2b06ce2221068697a9662779447ee81 | refs/heads/master | 2020-04-17T20:53:37.988381 | 2019-01-18T21:14:02 | 2019-01-18T21:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,522 | py | '''
Copyright (c) 2017-2019 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from pydicom.sequence import Sequence
from pydicom.dataset import RawDataElement
from deid.logger import bot
from pydicom import read_file
import os
import re
def extract_sequence(sequence,prefix=None):
'''return a pydicom.sequence.Sequence recursively
as a list of dictionary items
'''
items = []
for item in sequence:
for key,val in item.items():
if not isinstance(val,RawDataElement):
header = val.keyword
if prefix is not None:
header = "%s__%s" %(prefix,header)
value = val.value
if isinstance(value,bytes):
value = value.decode('utf-8')
if isinstance (value,Sequence):
items += extract_sequence(value,prefix=header)
continue
entry = {"key": header, "value": value}
items.append(entry)
return items
def expand_field_expression(field, dicom, contenders=None):
'''Get a list of fields based on an expression. If
no expression found, return single field. Options for fields include:
endswith: filter to fields that end with the expression
startswith: filter to fields that start with the expression
contains: filter to fields that contain the expression
allfields: include all fields
exceptfields: filter to all fields except those listed ( | separated)
'''
# Expanders that don't have a : must be checked for
expanders = ['all']
# if no contenders provided, use all in dicom headers
if contenders is None:
contenders = dicom.dir()
# Case 1: field is an expander without an argument (e.g., no :)
if field.lower() in expanders:
if field.lower() == "all":
fields = contenders
return fields
# Case 2: The field is a specific field OR an axpander with argument (A:B)
fields = field.split(':')
if len(fields) == 1:
return fields
# if we get down here, we have an expander and expression
expander, expression = fields
expression = expression.lower()
fields = []
# Expanders here require an expression, and have <expander>:<expression>
if expander.lower() == "endswith":
fields = [x for x in contenders if re.search('(%s)$' %expression, x.lower())]
elif expander.lower() == "startswith":
fields = [x for x in contenders if re.search('^(%s)' %expression, x.lower())]
elif expander.lower() == "except":
fields = [x for x in contenders if not re.search(expression, x.lower())]
elif expander.lower() == "contains":
fields = [x for x in contenders if re.search(expression, x.lower())]
return fields
def get_fields(dicom, skip=None, expand_sequences=True):
'''get fields is a simple function to extract a dictionary of fields
(non empty) from a dicom file.
'''
if skip is None:
skip = []
if not isinstance(skip,list):
skip = [skip]
fields = dict()
contenders = dicom.dir()
for contender in contenders:
if contender in skip:
continue
try:
value = dicom.get(contender)
# Adding expanded sequences
if isinstance(value,Sequence) and expand_sequences is True:
sequence_fields = extract_sequence(value,prefix=contender)
for sf in sequence_fields:
fields[sf['key']] = sf['value']
else:
if value not in [None,""]:
if isinstance(value,bytes):
value = value.decode('utf-8')
fields[contender] = str(value)
except:
pass # need to look into this bug
return fields
def get_fields_byVR(dicom,exclude_fields=None):
'''filter a dicom's fields based on a list of value
representations (VR). If exclude_fields is not defined,
defaults to "US" and "SS"
'''
if exclude_fields is None:
exclude_fields = ['US','SS']
if not isinstance(exclude_fields,list):
exclude_fields = [exclude_fields]
fields = []
for field in dicom.dir():
if dicom.data_element(field) is not None:
if "VR" in dicom.data_element(field).__dict__:
if dicom.data_element(field) not in exclude_fields:
fields.append(field)
return fields
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
4b1bb2a44f75ecdfb99ce83063c562e36192098b | 00af09f4ac6f98203910d86c3791c152184ace9a | /Lib/email/iterators.py | 881b2439a615ee50f496dc7c4be7e91b56a1ac07 | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,545 | py | <<<<<<< HEAD
<<<<<<< HEAD
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
yield from StringIO(payload)
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
=======
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
yield from StringIO(payload)
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
yield from StringIO(payload)
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| [
"Weldon@athletech.org"
] | Weldon@athletech.org |
4a355ab43b857bd830f0d81558594437970485f1 | 6c2ecefb12be6b04f597e3fb887d9389050aa7e1 | /DjangoCourse/第七周/freshshop/fs_goods/models.py | 928b9f8ce8a000fcae1b280535926d0418865cc1 | [] | no_license | GmyLsh/learngit | 99d3c75843d2b0b873f26e098025832985c635b3 | 3e7993c7119b79216fea24e5e35035336e4f5f5b | refs/heads/master | 2020-04-12T09:11:55.068312 | 2018-12-19T07:19:42 | 2018-12-19T07:19:42 | 162,395,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | from django.db import models
# 商品类型分类模型
class TypeInfo(models.Model):
title = models.CharField(max_length=20, verbose_name='分类标题')
is_delete = models.BooleanField(default=False, verbose_name='是否删除')
def __str__(self):
return self.title
class Meta:
verbose_name = '商品类型'
verbose_name_plural = '商品类型'
# 具体某一个商品的相关信息模型类
class GoodsInfo(models.Model):
# 商品名称
g_title = models.CharField(max_length=20, verbose_name='商品名称')
# 商品图片
g_pic = models.ImageField(upload_to='fs_goods/%Y/%m', verbose_name='商品图片')
# 商品价格 max_digits=5数字的总位数
# decimal_places=2小数位数,FloatField()不好控制小数位数。
g_price = models.DecimalField(max_digits=5, decimal_places=2, verbose_name='商品价格')
is_delete = models.BooleanField(default=False, verbose_name='是否删除')
# 商品计价单位
g_unit = models.CharField(max_length=20, default='500g', verbose_name='商品计价')
# 商品有按照人气排序,所以设置一个点击量
g_click = models.IntegerField(verbose_name='商品浏览量')
# 以上是商品列表页的相关数据提取,接下来是商品详情页中数据的分析提取。
# 商品简介
g_abstract = models.CharField(max_length=200, verbose_name='商品简介')
# 商品库存
g_stock = models.IntegerField(verbose_name='商品库存')
# 商品详情介绍
g_content = models.TextField(verbose_name='商品详情')
# 该商品对应的是哪一个商品分类,需要设置外键关联。
g_type = models.ForeignKey(TypeInfo, verbose_name='所属分类', on_delete=models.DO_NOTHING)
def __str__(self):
return self.g_title
class Meta:
verbose_name = '商品信息'
verbose_name_plural = '商品信息'
| [
"469192981@qq.com"
] | 469192981@qq.com |
e015ea8cfa5f548fa912b28984f7499b639d1bed | 255021fadf9f739db042809ca95f5b9f75609ec5 | /Adv/5650 핀볼게임.py | 771fdbfb2a006e5ee03930ac9293d0be077fa50d | [] | no_license | unsung107/Algorithm_study | 13bfff518fc1bd0e7a020bb006c88375c9ccacb2 | fb3b8563bae7640c52dbe9324d329ca9ee981493 | refs/heads/master | 2022-12-13T02:10:31.173333 | 2020-09-13T11:32:10 | 2020-09-13T11:32:10 | 295,137,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,745 | py | directions = {1:(0, -1), 2:(0, 1), 3:(-1, 0), 4:(1, 0)}
aside = {1:2, 2:1, 3:4, 4:3}
def meet1(y, x, d):
if d == 1:
d = 2
elif d == 2:
d = 4
elif d == 3:
d = 1
else:
d = 3
return y + directions[d][1], x + directions[d][0], d
def meet2(y, x, d):
if d == 1:
d = 4
elif d == 2:
d = 1
elif d == 3:
d = 2
else:
d = 3
return y + directions[d][1], x + directions[d][0], d
def meet3(y, x, d):
if d == 1:
d = 3
elif d == 2:
d = 1
elif d == 3:
d = 4
else:
d = 2
return y + directions[d][1], x + directions[d][0], d
def meet4(y, x, d):
if d == 1:
d = 2
elif d == 2:
d = 3
elif d == 3:
d = 4
else:
d = 1
return y + directions[d][1], x + directions[d][0], d
def meet5(y, x, d):
if d == 1:
d = 2
elif d == 2:
d = 1
elif d == 3:
d = 4
else:
d = 3
return y + directions[d][1], x + directions[d][0], d
def hall(y, x, d, num):
for wy, wx in halls[num]:
if (y, x) != (wy, wx):
return wy + directions[d][1] , wx + directions[d][0], d
# def move(y, x, d):
# global cnt
# if y == start[0] and x == start[1] and cnt > 0:
# return
# dx = directions[d][0]
# dy = directions[d][1]
# while 0 <= y < N and 0 <= x < N and not board[y][x]:
# y += dy
# x += dx
# if y == start[0] and x == start[1]:
# return
# if not (0 <= y < N and 0 <= x < N):
# d = aside[d]
# cnt += 1
# x += directions[d][0]
# y += directions[d][1]
# move(y, x, d)
# elif 1 <= board[y][x] <= 5:
# y, x, d = meets[board[y][x]](y, x, d)
# cnt += 1
# move(y, x, d)
# elif 6 <= board[y][x] <= 10:
# y, x, d = hall(y, x, d, board[y][x])
# move(y, x, d)
# else: return
meets = [0, meet1, meet2, meet3, meet4, meet5]
for ro in range(int(input())):
N = int(input())
starts = []
board = []
halls = [[] for _ in range(11)]
for y in range(N):
board.append(list(map(int,input().split())))
for x in range(N):
if not board[y][x]:
for d in range(1, 5):
starts.append((y, x, d))
elif 5 < board[y][x] < 11:
halls[board[y][x]].append((y, x))
res = 0
while starts:
y, x, d = starts.pop()
queue = [(y, x, d)]
cnt = 0
start = (y, x)
t = 0
while queue:
t += 1
y, x, d = queue.pop(0)
if y == start[0] and x == start[1] and t != 1:
break
dx = directions[d][0]
dy = directions[d][1]
while 0 <= y < N and 0 <= x < N and not board[y][x]:
y += dy
x += dx
if y == start[0] and x == start[1]:
break
if not (0 <= y < N and 0 <= x < N):
d = aside[d]
cnt += 1
x += directions[d][0]
y += directions[d][1]
queue.append((y, x, d))
elif 1 <= board[y][x] <= 5:
y, x, d = meets[board[y][x]](y, x, d)
cnt += 1
queue.append((y, x, d))
elif 6 <= board[y][x] <= 10:
y, x, d = hall(y, x, d, board[y][x])
queue.append((y, x, d))
else: break
print(queue)
if cnt > res:
res = cnt
print('#%d %d' %(ro + 1, res))
| [
"unsung102@naver.com"
] | unsung102@naver.com |
abbde3fd251b2dccc42da4c3a43154a3c9c35846 | 1ffc17893d9e15fd939628bbc41c3d2633713ebd | /skl2onnx/operator_converters/pipelines.py | c7b3ba34da23d9d118749da61a273cb421c92735 | [
"Apache-2.0"
] | permissive | xadupre/sklearn-onnx | 646e8a158cdded725064964494f0f8a760630aa8 | b05e4864cedbf4f2a9e6c003781d1db8b53264ac | refs/heads/master | 2023-09-01T15:58:38.112315 | 2022-12-21T01:59:45 | 2022-12-21T01:59:45 | 382,323,831 | 0 | 2 | Apache-2.0 | 2023-01-04T13:41:33 | 2021-07-02T11:22:00 | Python | UTF-8 | Python | false | false | 1,971 | py | # SPDX-License-Identifier: Apache-2.0
from sklearn.base import is_classifier
from ..common._registration import register_converter
from ..common._topology import Scope, Operator
from ..common._container import ModelComponentContainer
from .._parse import _parse_sklearn
def convert_pipeline(scope: Scope, operator: Operator,
container: ModelComponentContainer):
model = operator.raw_operator
inputs = operator.inputs
for step in model.steps:
step_model = step[1]
if is_classifier(step_model):
scope.add_options(id(step_model), options={'zipmap': False})
container.add_options(id(step_model), options={'zipmap': False})
outputs = _parse_sklearn(scope, step_model, inputs,
custom_parsers=None)
inputs = outputs
if len(outputs) != len(operator.outputs):
raise RuntimeError(
"Mismatch between pipeline output %d and "
"last step outputs %d." % (
len(outputs), len(operator.outputs)))
for fr, to in zip(outputs, operator.outputs):
container.add_node(
'Identity', fr.full_name, to.full_name,
name=scope.get_unique_operator_name("Id" + operator.onnx_name))
def convert_feature_union(scope: Scope, operator: Operator,
container: ModelComponentContainer):
raise NotImplementedError(
"This converter not needed so far. It is usually handled "
"during parsing.")
def convert_column_transformer(scope: Scope, operator: Operator,
container: ModelComponentContainer):
raise NotImplementedError(
"This converter not needed so far. It is usually handled "
"during parsing.")
register_converter('SklearnPipeline', convert_pipeline)
register_converter('SklearnFeatureUnion', convert_feature_union)
register_converter('SklearnColumnTransformer', convert_column_transformer)
| [
"noreply@github.com"
] | xadupre.noreply@github.com |
c0190465b828f87abbc3ab7021fb9c721401241b | 1705e97ef5613685e142e3f78a2057399b09858c | /Code/asiportal/rquests/services/emailer.py | 78a33d7d3764f695eb5e9b690d2725d5eb245dbb | [] | no_license | FIU-SCIS-Senior-Projects/Academic-Success-Initiative---ASI-PantherCentric-1.0 | 0b956175efb031022ed32412195531c7f0c162c5 | 8ee64b58e2634384d5905defd3701a453b49b966 | refs/heads/master | 2022-11-24T00:07:52.458186 | 2017-08-02T01:36:32 | 2017-08-02T01:36:32 | 91,715,982 | 0 | 0 | null | 2022-11-22T01:31:04 | 2017-05-18T16:37:10 | SQLPL | UTF-8 | Python | false | false | 2,813 | py | from django.core.mail import EmailMessage
from django.template.loader import get_template
def no_room_available_email(request):
title = '[ASI] Tutoring Request Unable To Be Scheduled'
email_template = get_template('no_room_available.txt')
email_context = {'firstName' : request.submitted_by.first_name,
'courseID' : request.course,
'start' : request.availability.start_time,
'end' : request.availability.end_time,
}
message = email_template.render(email_context)
tutee_email = request.submitted_by.email
email = EmailMessage(title,message,'asi-noreply@cs.fiu.edu',[tutee_email], bcc=['asisoftwaretest@gmail.com'])
email.send()
def tutoring_confirmation_email(session):
title = '[ASI] Scheduled Tutoring Session Confirmation'
email_template = get_template('session_confirmation.txt')
email_context = {'firstName': session.tutee.first_name,
'courseID': session.course,
'day':session.availability.get_day_display(),
'startTime': session.availability.start_time,
'endTime':session.availability.end_time,
'ambassadorName':
session.availability.ambassador.get_full_name(),
'ambassadorEmail':session.availability.ambassador.email,
'startDate':session.start_date.strftime('%B %d, %Y'),
'endDate':session.end_date.strftime('%B %d, %Y'),
}
message = email_template.render(email_context)
tutee_email = session.tutee.email
ambassador_email = session.availability.ambassador.email
email = EmailMessage(title,message,'asi-noreply@cs.fiu.edu',[tutee_email, ambassador_email], bcc=['asisoftwaretest@gmail.com'])
email.send()
def request_submission_email(request):
title = '[ASI] Tutoring Request Received'
email_template = get_template('request_confirmation.txt')
email_context = {
'tutee_name' : request.submitted_by.get_full_name(),
'course' : request.course,
'day' : request.availability.get_day_display(),
'start_time' : request.availability.start_time.strftime('%-I:%M %p'),
'end_time' : request.availability.end_time.strftime('%-I:%M %p'),
'ambassador': request.availability.ambassador.get_full_name(),
'semester' : request.availability.semester,
}
message = email_template.render(email_context)
tutee_email = request.submitted_by.email
email = EmailMessage(title,message,'asi-noreply@cs.fiu.edu', [tutee_email], bcc=['asisoftwaretest@gmail.com'])
email.send()
| [
"jakedlopez@gmail.com"
] | jakedlopez@gmail.com |
49f4d6d252100b0b949ba1b7bf23ef3ea84a80f8 | faf793376991092615975a559c6bed4e093acc44 | /SECTION 10 lists in python/52 multidimentional list.py | 56299802d4b05bc3d9ab01da570d5b5a043afac8 | [] | no_license | jdiaz-dev/practicing-python | 2385f2541759cfc9ed221b62030c28e8cf6ddde4 | 139b7dd4332e9ab3dd73abee0308cff41f4657fe | refs/heads/master | 2023-04-05T06:13:53.590830 | 2023-03-19T16:06:00 | 2023-03-19T16:06:00 | 320,443,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py |
contacts = [
[
'junior',
234234
],
[
'yessy',
1234
],
[
'frank',
543
],
[
'esteban',
54645
],
]
for contac in contacts :
for element in contac :
if type(element) == int :
print(element) | [
"lordgelsin26@gmail.com"
] | lordgelsin26@gmail.com |
710509fda1ddbdba3b9d7389f504685c47ba5004 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03060/s036965559.py | d6d7712aa32946ddbf4bfd84f61f44f5d904de74 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | N=int(input())
V=list(map(int, input().split()))
C=list(map(int, input().split()))
ans=0
for i in range(2**N):
X=0
for j in range(N):
if (i>>j)&1:
X+=(V[j]-C[j])
ans=max(ans,X)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b90ad803daea0eea82e9295f45004cdbba4b9f07 | f0b741f24ccf8bfe9bd1950425d83b6291d21b10 | /kubernetes_platform/python/test/unit/test_volume.py | 87835ff6a63c9b890b3b70da6dde13dbc3de3e3b | [
"Apache-2.0"
] | permissive | kubeflow/pipelines | e678342b8a325559dec0a6e1e484c525fdcc8ce8 | 3fb199658f68e7debf4906d9ce32a9a307e39243 | refs/heads/master | 2023-09-04T11:54:56.449867 | 2023-09-01T19:07:33 | 2023-09-01T19:12:27 | 133,100,880 | 3,434 | 1,675 | Apache-2.0 | 2023-09-14T20:19:06 | 2018-05-12T00:31:47 | Python | UTF-8 | Python | false | false | 6,215 | py | # Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.protobuf import json_format
from kfp import dsl
from kfp import kubernetes
import pytest
class TestMountPVC:
def test_mount_one(self):
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [{
'constant': 'pvc-name',
'mountPath': 'path'
}]
}
}
}
}
}
}
def test_mount_two(self):
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path1',
)
kubernetes.mount_pvc(
task,
pvc_name='other-pvc-name',
mount_path='path2',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [
{
'constant': 'pvc-name',
'mountPath': 'path1'
},
{
'constant': 'other-pvc-name',
'mountPath': 'path2'
},
]
}
}
}
}
}
}
def test_mount_preserves_secret_as_env(self):
# checks that mount_pvc respects previously set secrets
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.use_secret_as_env(
task,
secret_name='secret-name',
secret_key_to_env={'password': 'SECRET_VAR'},
)
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [{
'constant': 'pvc-name',
'mountPath': 'path'
}],
'secretAsEnv': [{
'secretName':
'secret-name',
'keyToEnv': [{
'secretKey': 'password',
'envVar': 'SECRET_VAR'
}]
}]
}
}
}
}
}
}
def test_mount_preserves_secret_as_vol(self):
# checks that mount_pvc respects previously set secrets
@dsl.pipeline
def my_pipeline():
task = comp()
kubernetes.use_secret_as_volume(
task,
secret_name='secret-name',
mount_path='secretpath',
)
kubernetes.mount_pvc(
task,
pvc_name='pvc-name',
mount_path='path',
)
assert json_format.MessageToDict(my_pipeline.platform_spec) == {
'platforms': {
'kubernetes': {
'deploymentSpec': {
'executors': {
'exec-comp': {
'pvcMount': [{
'constant': 'pvc-name',
'mountPath': 'path'
}],
'secretAsVolume': [{
'secretName': 'secret-name',
'mountPath': 'secretpath'
}]
}
}
}
}
}
}
def test_illegal_pvc_name(self):
@dsl.component
def identity(string: str) -> str:
return string
with pytest.raises(
ValueError,
match=r'Argument for \'pvc_name\' must be an instance of str or PipelineChannel\. Got unknown input type: <class \'int\'>\.',
):
@dsl.pipeline
def my_pipeline(string: str = 'string'):
op1 = kubernetes.mount_pvc(
identity(string=string),
pvc_name=1,
mount_path='/path',
)
@dsl.component
def comp():
pass
| [
"noreply@github.com"
] | kubeflow.noreply@github.com |
b5328046f6cdb5aa66427ffe9e2707dabca2be0e | d9c95cd0efad0788bf17672f6a4ec3b29cfd2e86 | /disturbance/migrations/0216_proposal_fee_invoice_references.py | 6f6dc55ec362b2eba775bf211d8f1b787e1e6f82 | [
"Apache-2.0"
] | permissive | Djandwich/disturbance | cb1d25701b23414cd91e3ac5b0207618cd03a7e5 | b1ba1404b9ca7c941891ea42c00b9ff9bcc41237 | refs/heads/master | 2023-05-05T19:52:36.124923 | 2021-06-03T06:37:53 | 2021-06-03T06:37:53 | 259,816,629 | 1 | 1 | NOASSERTION | 2021-06-03T09:46:46 | 2020-04-29T03:39:33 | Python | UTF-8 | Python | false | false | 622 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-05 06:04
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0215_merge_20210202_1343'),
]
operations = [
migrations.AddField(
model_name='proposal',
name='fee_invoice_references',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, default='', max_length=50, null=True), null=True, size=None),
),
]
| [
"katsufumi.shibata@dbca.wa.gov.au"
] | katsufumi.shibata@dbca.wa.gov.au |
5ed73b5cde48e571ff3a4f5ef8a0ff1133755556 | 675e4c77ea4f1053f1acf5b76b9711b53157d841 | /questions/serializers.py | 5cdbc43af6b1978b33f1b6684fb86a672d0fd672 | [] | no_license | moh-hosseini98/django-rest-quora-like | 6d67717be5afd708eacdd74bba706be90a73a1e9 | 7bf020515145a54dcc822d50584e12c0398e7ee5 | refs/heads/master | 2023-04-09T04:56:03.783036 | 2021-04-29T04:18:12 | 2021-04-29T04:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | from rest_framework import serializers
from .models import Question,Answer,qlike,alike,Reply
class AnswerSerializer(serializers.ModelSerializer):
author = serializers.StringRelatedField(read_only=True)
answer_likes = serializers.SerializerMethodField()
number_of_replies = serializers.SerializerMethodField()
like_by_req_user = serializers.SerializerMethodField()
class Meta:
model = Answer
exclude = ('updated_at','question',)
def get_answer_likes(self,instance):
return instance.likers.count()
def get_number_of_replies(self,instance):
return instance.replies.count()
def get_like_by_req_user(self,instance):
request = self.context['request']
return instance.likers.filter(liker_id=request.user.id).exists()
class QuestionSerializer(serializers.ModelSerializer):
author = serializers.StringRelatedField(read_only=True)
slug = serializers.SlugField(read_only=True)
number_of_likes = serializers.SerializerMethodField()
number_of_answers = serializers.SerializerMethodField()
like_by_req_user = serializers.SerializerMethodField()
user_has_answered = serializers.SerializerMethodField()
class Meta:
model = Question
exclude = ('updated_at',)
lookup_field = 'slug'
def get_number_of_answers(self,instance):
return instance.answers.count()
def get_number_of_likes(self,instance):
'''return qlike.objects.filter(question=instance).count()'''
return instance.likes.count()
def get_like_by_req_user(self,instance):
request = self.context['request']
return instance.likes.filter(liker_id=request.user.id).exists()
def get_user_has_answered(self,instance):
request = self.context['request']
return instance.answers.filter(
author=request.user
).exists()
# return Answer.objects.filter(
# question=instance,author=request.user
# ).exists()
class QuestionLikeSerializer(serializers.ModelSerializer):
liker = serializers.StringRelatedField(read_only=True)
class Meta:
model = qlike
exclude = ('question',)
class AnswerLikeSerializer(serializers.ModelSerializer):
liker = serializers.StringRelatedField(read_only=True)
class Meta:
model = alike
exclude = ('answer',)
class ReplySerializer(serializers.ModelSerializer):
author = serializers.StringRelatedField(read_only=True)
class Meta:
model = Reply
exclude = ('answer',)
| [
"mamadhss@yahoo.com"
] | mamadhss@yahoo.com |
95bacd72df21ee4e7d6eba5d398151122f814f1e | fd18ce27b66746f932a65488aad04494202e2e0d | /d03_socket_http/pro_2.py | d3a7e2e1ff30f43c75d905ad8fb338e68a45f33f | [] | no_license | daofeng123/ClassCodes | 1acbd843836e550c9cebf67ef21dfca9f6b9fc87 | fbcd1f24d79b8bb56ad0669b07ad118064609612 | refs/heads/master | 2020-06-24T12:34:28.148197 | 2019-08-15T03:56:40 | 2019-08-15T03:56:40 | 198,963,469 | 3 | 0 | null | 2019-07-26T06:53:45 | 2019-07-26T06:53:44 | null | UTF-8 | Python | false | false | 193 | py | # coding=utf-8
import os
pipe_file = 'io.pipe'
os.mkfifo(pipe_file)
fd = os.open(pipe_file, os.O_RDONLY)
while True:
s = os.read(fd, 10)
if not s:
break
print(s.decode())
| [
"38395870@qq.com"
] | 38395870@qq.com |
0203efcddf9cbe42574c145dec465a59c98bc2b0 | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Django/Django_DB/Django_mysql/mysqlpro/mysqlpro/asgi.py | e8daa3c14429a4891a272b7a5bf0977492bd3708 | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for mysqlpro project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysqlpro.settings')
application = get_asgi_application()
| [
"suendra.aratikatla1608@gmail.com"
] | suendra.aratikatla1608@gmail.com |
7aa31c841c9d3967ee889869937a5741cd46cf68 | ed2389b9c0f8b45f4d1ac2e52815846ed37bc127 | /train_deep_logo_cnn.py | c95a538c88e80aa52713c4c770b95ccb8dbe4aea | [
"MIT"
] | permissive | tracxpoint/AIC_DeepLogo | 161281c443a6d1bc96556fa41d1818ff4609c5fa | 9b936208fcb785cc0affb6e2e9087d1bb83744d4 | refs/heads/master | 2021-09-06T22:03:45.942170 | 2018-01-04T08:27:55 | 2018-01-04T08:27:55 | 116,278,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,980 | py | # The MIT License (MIT)
# Copyright (c) 2016 satojkovic
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import tensorflow as tf
import numpy as np
from six.moves import cPickle as pickle
from six.moves import range
import sys
import os
import common
import model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
"train_dir", "flickr_logos_27_dataset",
"Directory where to write event logs and checkpoint.")
tf.app.flags.DEFINE_integer("max_steps", 20001, "Number of batches to run.")
tf.app.flags.DEFINE_integer("image_width", common.CNN_IN_WIDTH,
"A width of an input image.")
tf.app.flags.DEFINE_integer("image_height", common.CNN_IN_HEIGHT,
"A height of an input image.")
tf.app.flags.DEFINE_integer("learning_rate", 0.0001, "Learning rate")
tf.app.flags.DEFINE_integer("batch_size", 64, "A batch size")
tf.app.flags.DEFINE_integer("num_channels", common.CNN_IN_CH,
"A number of channels of an input image.")
PICKLE_FILENAME = 'deep_logo.pickle'
def accuracy(predictions, labels):
return (100 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0])
def reformat(dataset, labels):
dataset = dataset.reshape((-1, FLAGS.image_height, FLAGS.image_width,
FLAGS.num_channels)).astype(np.float32)
labels = (
np.arange(model.NUM_CLASSES) == labels[:, None]).astype(np.float32)
return dataset, labels
def read_data():
with open(PICKLE_FILENAME, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save
print('Training set', train_dataset.shape, train_labels.shape)
print('Valid set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
return [train_dataset, valid_dataset,
test_dataset], [train_labels, valid_labels, test_labels]
def main():
if len(sys.argv) > 1:
f = np.load(sys.argv[1])
# f.files has unordered keys ['arr_8', 'arr_9', 'arr_6'...]
# Sorting keys by value of numbers
initial_weights = [
f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))
]
else:
initial_weights = None
# read input data
dataset, labels = read_data()
train_dataset, train_labels = reformat(dataset[0], labels[0])
valid_dataset, valid_labels = reformat(dataset[1], labels[1])
test_dataset, test_labels = reformat(dataset[2], labels[2])
print('Training set', train_dataset.shape, train_labels.shape)
print('Valid set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# Training model
graph = tf.Graph()
with graph.as_default():
# Weights and biases
model_params = model.params()
# Initial weights
if initial_weights is not None:
assert len(model_params) == len(initial_weights)
assign_ops = [
w.assign(v) for w, v in zip(model_params, initial_weights)
]
# Input data
tf_train_dataset = tf.placeholder(
tf.float32,
shape=(FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width,
FLAGS.num_channels))
tf_train_labels = tf.placeholder(
tf.float32, shape=(FLAGS.batch_size, model.NUM_CLASSES))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Training computation
logits = model.cnn(tf_train_dataset, model_params, keep_prob=0.5)
with tf.name_scope('loss'):
loss = tf.reduce_sum(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=tf_train_labels))
tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
# Predictions for the training, validation, and test data
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
model.cnn(tf_valid_dataset, model_params, keep_prob=1.0))
test_prediction = tf.nn.softmax(
model.cnn(tf_test_dataset, model_params, keep_prob=1.0))
# Merge all summaries
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/train')
# Add ops to save and restore all the variables
saver = tf.train.Saver()
# Do training
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
if initial_weights is not None:
session.run(assign_ops)
print('initialized by pre-learned values')
else:
print('initialized')
for step in range(FLAGS.max_steps):
offset = (step * FLAGS.batch_size) % (
train_labels.shape[0] - FLAGS.batch_size)
batch_data = train_dataset[offset:(offset + FLAGS.batch_size
), :, :, :]
batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]
feed_dict = {
tf_train_dataset: batch_data,
tf_train_labels: batch_labels
}
try:
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if step % 50 == 0:
summary, _ = session.run(
[merged, optimizer], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(
predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
except KeyboardInterrupt:
last_weights = [p.eval() for p in model_params]
np.savez("weights.npz", *last_weights)
return last_weights
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(),
test_labels))
# Save the variables to disk.
save_dir = "models"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, "deep_logo_model")
saved = saver.save(session, save_path)
print("Model saved in file: %s" % saved)
if __name__ == '__main__':
main()
| [
"satojkovic@gmail.com"
] | satojkovic@gmail.com |
3eb78ccf566602282c5c3dfa11e4a7b6bad48b99 | b7fab13642988c0e6535fb75ef6cb3548671d338 | /tools/ydk-py-master/cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_Ethernet_SPAN_datatypes.py | 5cf1f786c1167dbd94086d2115a83fb0bc7c7ecc | [
"Apache-2.0"
] | permissive | juancsosap/yangtraining | 6ad1b8cf89ecdebeef094e4238d1ee95f8eb0824 | 09d8bcc3827575a45cb8d5d27186042bf13ea451 | refs/heads/master | 2022-08-05T01:59:22.007845 | 2019-08-01T15:53:08 | 2019-08-01T15:53:08 | 200,079,665 | 0 | 1 | null | 2021-12-13T20:06:17 | 2019-08-01T15:54:15 | Python | UTF-8 | Python | false | false | 1,120 | py | """ Cisco_IOS_XR_Ethernet_SPAN_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
from ydk.entity_utils import get_relative_entity_path as _get_relative_entity_path
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YPYError, YPYModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SpanSessionClass(Enum):
"""
SpanSessionClass
Span session class
.. data:: ethernet = 0
Mirror Ethernet packets
.. data:: ipv4 = 1
Mirror IPv4 packets
.. data:: ipv6 = 2
Mirror IPv6 packets
"""
ethernet = Enum.YLeaf(0, "ethernet")
ipv4 = Enum.YLeaf(1, "ipv4")
ipv6 = Enum.YLeaf(2, "ipv6")
class SpanSessionClassOld(Enum):
"""
SpanSessionClassOld
Span session class old
.. data:: true = 0
Mirror Ethernet packets
"""
true = Enum.YLeaf(0, "true")
| [
"juan.c.sosa.p@gmail.com"
] | juan.c.sosa.p@gmail.com |
8692b572824530ddb8d707d7d9cd3e25d74df493 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/appengine/lib/external/admin/tools/conversion/yaml_schema.py | a5982a4efc0f604246f9948bda038bf50a2a5432 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 5,782 | py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Definition for conversion between legacy YAML and One Platform protos."""
from googlecloudsdk.appengine.lib.external.admin.tools.conversion import converters as c
from googlecloudsdk.appengine.lib.external.admin.tools.conversion import schema as s
SCHEMA = s.Message(
api_config=s.Message(
url=s.Value(converter=c.ToJsonString),
login=s.Value(converter=c.EnumConverter('LOGIN')),
secure=s.Value('security_level', converter=c.EnumConverter('SECURE')),
auth_fail_action=s.Value(converter=c.EnumConverter('AUTH_FAIL_ACTION')),
script=s.Value(converter=c.ToJsonString)),
auto_id_policy=s.Value('beta_settings',
lambda val: {'auto_id_policy': val}),
automatic_scaling=s.Message(
converter=c.ConvertAutomaticScaling,
cool_down_period_sec=s.Value('cool_down_period',
converter=c.SecondsToDuration),
cpu_utilization=s.Message(
target_utilization=s.Value(),
aggregation_window_length_sec=s.Value('aggregation_window_length',
converter=c.SecondsToDuration)
),
max_num_instances=s.Value('max_total_instances'),
min_pending_latency=s.Value(converter=c.LatencyToDuration),
min_idle_instances=s.Value(converter=
c.StringToInt(handle_automatic=True)),
max_idle_instances=s.Value(converter=
c.StringToInt(handle_automatic=True)),
max_pending_latency=s.Value(converter=c.LatencyToDuration),
max_concurrent_requests=s.Value(converter=c.StringToInt()),
min_num_instances=s.Value('min_total_instances'),
target_network_sent_bytes_per_sec=s.Value(
'target_sent_bytes_per_sec'),
target_network_sent_packets_per_sec=s.Value(
'target_sent_packets_per_sec'),
target_network_received_bytes_per_sec=s.Value(
'target_received_bytes_per_sec'),
target_network_received_packets_per_sec=s.Value(
'target_received_packets_per_sec'),
target_disk_write_bytes_per_sec=s.Value(
'target_write_bytes_per_sec'),
target_disk_write_ops_per_sec=s.Value(
'target_write_ops_per_sec'),
target_disk_read_bytes_per_sec=s.Value(
'target_read_bytes_per_sec'),
target_disk_read_ops_per_sec=s.Value(
'target_read_ops_per_sec'),
target_request_count_per_sec=s.Value(),
target_concurrent_requests=s.Value()),
basic_scaling=s.Message(
idle_timeout=s.Value(converter=c.IdleTimeoutToDuration),
max_instances=s.Value(converter=c.StringToInt())),
beta_settings=s.Map(),
default_expiration=s.Value(converter=c.ExpirationToDuration),
env=s.Value(),
env_variables=s.Map(),
error_handlers=s.RepeatedField(element=s.Message(
error_code=s.Value(converter=c.EnumConverter('ERROR_CODE')),
file=s.Value('static_file', converter=c.ToJsonString),
mime_type=s.Value(converter=c.ToJsonString))),
# Restructure the handler after it's complete, since this is more
# complicated than a simple rename.
handlers=s.RepeatedField(element=s.Message(
converter=c.ConvertUrlHandler,
auth_fail_action=s.Value(converter=c.EnumConverter('AUTH_FAIL_ACTION')),
static_dir=s.Value(converter=c.ToJsonString),
secure=s.Value('security_level', converter=c.EnumConverter('SECURE')),
redirect_http_response_code=s.Value(
converter=c.EnumConverter('REDIRECT_HTTP_RESPONSE_CODE')),
http_headers=s.Map(),
url=s.Value('url_regex'),
expiration=s.Value(converter=c.ExpirationToDuration),
static_files=s.Value('path', converter=c.ToJsonString),
script=s.Value('script_path', converter=c.ToJsonString),
upload=s.Value('upload_path_regex', converter=c.ToJsonString),
api_endpoint=s.Value(),
application_readable=s.Value(),
position=s.Value(),
login=s.Value(converter=c.EnumConverter('LOGIN')),
mime_type=s.Value(converter=c.ToJsonString),
require_matching_file=s.Value())),
health_check=s.Message(
check_interval_sec=s.Value('check_interval',
converter=c.SecondsToDuration),
timeout_sec=s.Value('timeout', converter=c.SecondsToDuration),
healthy_threshold=s.Value(),
enable_health_check=s.Value('disable_health_check', converter=c.Not),
unhealthy_threshold=s.Value(),
host=s.Value(converter=c.ToJsonString),
restart_threshold=s.Value()),
inbound_services=s.RepeatedField(element=s.Value(
converter=c.EnumConverter('INBOUND_SERVICE'))),
instance_class=s.Value(converter=c.ToJsonString),
libraries=s.RepeatedField(element=s.Message(
version=s.Value(converter=c.ToJsonString),
name=s.Value(converter=c.ToJsonString))),
manual_scaling=s.Message(
instances=s.Value(converter=c.StringToInt())),
network=s.Message(
instance_tag=s.Value(converter=c.ToJsonString),
name=s.Value(converter=c.ToJsonString),
forwarded_ports=s.RepeatedField(element=s.Value(converter=
c.ToJsonString))),
nobuild_files=s.Value('nobuild_files_regex', converter=c.ToJsonString),
resources=s.Message(
memory_gb=s.Value(),
disk_size_gb=s.Value('disk_gb'),
cpu=s.Value()),
runtime=s.Value(converter=c.ToJsonString),
threadsafe=s.Value(),
version=s.Value('id', converter=c.ToJsonString),
vm=s.Value(),
vm_settings=s.Map('beta_settings'))
| [
"joe@longreen.io"
] | joe@longreen.io |
b945506a9f4a29f2511783145f6af33587bc473c | 3e5b2eb741f5ae52752328274a616b475dbb401a | /services/core-api/tests/now_applications/resources/test_now_application_put.py | ea8ee6fa40ba11e0a78d8a568d69b0bf9ac3409d | [
"Apache-2.0"
] | permissive | bcgov/mds | 165868f97d0002e6be38680fe4854319a9476ce3 | 60277f4d71f77857e40587307a2b2adb11575850 | refs/heads/develop | 2023-08-29T22:54:36.038070 | 2023-08-29T05:00:28 | 2023-08-29T05:00:28 | 131,050,605 | 29 | 63 | Apache-2.0 | 2023-09-14T21:40:25 | 2018-04-25T18:54:47 | JavaScript | UTF-8 | Python | false | false | 1,253 | py | import json, decimal, pytest
from flask_restplus import marshal, fields
from app.api.now_applications.response_models import NOW_APPLICATION_MODEL
from tests.now_application_factories import NOWApplicationIdentityFactory, NOWApplicationFactory
class TestNOWApplication:
"""PUT mines/now-applications/<guid>"""
@pytest.mark.skip(
reason='Application changes now fire a request to NROS so need to mock the service call.')
def test_put_application_field(self, test_client, db_session, auth_headers):
now_application = NOWApplicationFactory()
test_application = NOWApplicationIdentityFactory(now_application=now_application)
assert test_application.now_application
data = marshal(test_application.now_application, NOW_APPLICATION_MODEL)
new_latitude = '-55.111'
data['latitude'] = new_latitude
put_resp = test_client.put(
f'/now-applications/{test_application.now_application_guid}',
json=data,
headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 200, put_resp.response
put_data = json.loads(put_resp.data.decode())
assert decimal.Decimal(put_data['latitude']) == decimal.Decimal(new_latitude)
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
0412431b0da4e6ff054478296cc9d25714eb67c8 | fa8036fd416aecab3f1ca617acf0989f032f02ce | /abc165/A.py | d0cee18d96e021925e06a71e9d431a57b9a655f3 | [] | no_license | MitsuruFujiwara/Atcoder | e2e2e82014e33e3422ea40eca812c6fdd8bcaaaa | bc73c4cd35a80c106d0e9b14cee34a064d89d343 | refs/heads/master | 2022-12-14T23:50:56.843336 | 2020-09-17T22:25:57 | 2020-09-17T22:25:57 | 252,980,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | K = int(input())
A, B = map(int, input().split())
ans = 'NG'
for n in range(A,B+1):
if n%K ==0:
ans = 'OK'
print(ans)
| [
"fujiwara52jp@gmail.com"
] | fujiwara52jp@gmail.com |
4cc0049abb138568478357ab24aa4bfb3ca4fffb | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /company/amazon/linked_list/intersection.py | 962fdaf38fc28e16cedeeda12cb343a8230f6c91 | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def get_length(self, head):
length = 0
while head:
length += 1
head = head.next
return length
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
def getIntersectionNode(self, A, B):
len_A = self.get_length(A)
len_B = self.get_length(B)
if not A or not B:
return None
p_A = A
p_B = B
diff = len_A - len_B
if len_A > len_B:
while diff > 0:
p_A = p_A.next
diff -= 1
else:
while diff < 0:
p_B = p_B.next
diff += 1
while p_A and p_B:
if p_A == p_B:
return p_A
p_A = p_A.next
p_B = p_B.next
return None
| [
"saluja.harkirat@gmail.com"
] | saluja.harkirat@gmail.com |
c69ab5d34d637db9d7131d63900bcf5f52226b04 | f17a78351f53086ce2f9a42bb4b67a0528e3f651 | /backend/main/urls.py | 5f6aa88e52359ba7cffc7b901f523ce4f9264e64 | [
"MIT"
] | permissive | tony/futurecoder | 556dad5c28d4317f0928d821e3e22592d03d09b3 | 986e23137ef9ea2ca267c8b51ab6e1dfe10e530e | refs/heads/master | 2022-11-19T06:20:20.703834 | 2020-07-21T20:37:24 | 2020-07-21T20:37:24 | 282,333,435 | 0 | 0 | MIT | 2020-07-24T23:21:20 | 2020-07-24T23:21:19 | null | UTF-8 | Python | false | false | 1,182 | py | """book URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
from main.text import chapters
from main.views import api_view, FrontendAppView, HomePageView
home_view = HomePageView.as_view()
urlpatterns = [
path('api/<method_name>/', api_view),
path('home/', home_view),
path('', home_view),
path('course/', ensure_csrf_cookie(FrontendAppView.as_view())),
path('toc/', TemplateView.as_view(template_name="toc.html", extra_context=dict(chapters=chapters))),
]
| [
"alex.mojaki@gmail.com"
] | alex.mojaki@gmail.com |
bbac5a99758351de35a1c9ee434cc4b0470a6ae4 | 394b5d87d193071e10d7f875e874edeb1720adbb | /staff/views.py | 1b780d23c1a05dde4c12d9f3c2a137c7137bf4f8 | [] | no_license | Andreyglass1989/Academy | 346f3f6d468f44aeed2f0e73b3ac6c1ef206fba4 | 79527e9752324cf820314114e1dc97962c92f2fc | refs/heads/master | 2021-01-20T15:12:49.542352 | 2017-05-26T07:11:57 | 2017-05-26T07:11:57 | 90,734,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # from django.shortcuts import render, render_to_response
# from .models import Staff
#
# # Create your views here.
#
# def main_menu(request):
# staffs = Staff.objects.all()
# title = "Staff"
# context = {"title_docum":title, "staffs": staffs}
# return render_to_response(request,"base.html",context) | [
"1989andreyglass@gmail.com"
] | 1989andreyglass@gmail.com |
7f82070c300a6bc1d32d4659948899e75073d7f1 | e82245a9e623ef3e2b4b9c02f0fd932c608c4484 | /pramp.com/07-busiest_time_in_the_mall-scratch_work.py | 7d56bc26cc311d181c29572540e87d3ffc59e514 | [] | no_license | Zylophone/Programming-for-Sport | 33e8161028cfddce3b7a1243eb092070107342e3 | 193d6184f939303d8661f68d6fd06bdec95df351 | refs/heads/master | 2020-06-16T23:11:44.719286 | 2017-05-21T17:10:46 | 2017-05-21T17:10:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # doesn't handle case where people enter and exit at the same second
maintain a var that tracks the number of people in the mall
time count type number_of_people
1 4 enter 4
3 3 exit 1
10 10 enter 11
14 20 enter 31
19 5 exit 26
between [time 14 and time 19] there were 31 people and this was the busiest period
# [(1, 0, xxxx)]<< you won't get a list item that says 0 people entered/exited the mall
mall_traffic= [(1, 4, "enter"), (3, 3, "exit")]
# sort mall_traffic by timestamp
TIME= 0
mall_traffic.sort(key= lambda x : x[TIME])
def findBusiestPeriod(mall_traffic):
number_of_people= 0
max_number_of_people_so_far= 0
max_start= None
max_end= None
LAST= len(mall_traffic) - 1 # mustn't be -1
TIME= 0
TIME_YEAR_ENDS_AT= ???? # UNIX TIME corresponding to DEC 31, YYYY 11:59:59 PM
for idx, time, count, typ in enumerate(mall_traffic):
sign= 1 if typ == "enter" else -1
number_of_people+= (sign * count)
# if typ == "enter": number_of_people+= count
# elif typ == "exit": number_of_people-= count
# else: raise "error"
if number_of_people > max_number_of_people_so_far:
max_number_of_people_so_far= number_of_people
max_start= time
if idx == LAST:
max_end= TIME_YEAR_ENDS_AT
else:
max_end= mall_traffic[idx+1][TIME]
return [max_start, max_end] | [
"jfv33@cornell.edu"
] | jfv33@cornell.edu |
48e110407f51f654dff1452b5eb1425e8cd86c75 | cc310e5586d7f7b6824802d290ba15e72832b76e | /ssil_sso_ms/global_function.py | 97e7fd35cd07a01843dedf068f4176bf2aee5601 | [] | no_license | abhisek11/my_django_boiler | 9bccc1d57c8bab83f54f6083919531a7b6b97ff6 | af36011a86376291af01a1c3a569f999bed4cb0d | refs/heads/master | 2022-12-13T19:43:26.500510 | 2020-03-02T07:03:13 | 2020-03-02T07:03:13 | 244,302,032 | 0 | 0 | null | 2022-12-08T01:51:32 | 2020-03-02T06:57:59 | Python | UTF-8 | Python | false | false | 3,086 | py |
from django.shortcuts import render
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication
from hrms.models import *
from hrms.serializers import *
from pagination import CSLimitOffestpagination,CSPageNumberPagination
from rest_framework.views import APIView
from django.conf import settings
from rest_framework import mixins
from rest_framework import filters
from datetime import datetime,timedelta
import collections
from rest_framework.parsers import FileUploadParser
from django_filters.rest_framework import DjangoFilterBackend
from custom_decorator import *
import os
from django.http import JsonResponse
from datetime import datetime
from decimal import Decimal
import pandas as pd
import xlrd
import numpy as np
from django.db.models import Q
from custom_exception_message import *
from decimal import *
import math
from django.contrib.auth.models import *
from django.db.models import F
from django.db.models import Count
from core.models import *
from pms.models import *
import re
def userdetails(user):
# print(type(user))
if isinstance(user,(int)):
name = User.objects.filter(id =user)
for i in name:
# print("i",i)
f_name_l_name = i.first_name +" "+ i.last_name
# print("f_name_l_name",f_name_l_name)
elif isinstance(user,(str)):
# print(user ,"str")
name = User.objects.filter(username=user)
for i in name:
# print("i",i)
f_name_l_name = i.first_name +" "+ i.last_name
# print("f_name_l_name",f_name_l_name)
else:
f_name_l_name = None
return f_name_l_name
def designation(designation):
if isinstance(designation,(str)):
desg_data = TCoreUserDetail.objects.filter(cu_user__username =designation)
if desg_data:
for desg in desg_data:
return desg.designation.cod_name
else:
return None
elif isinstance(designation,(int)):
desg_data = TCoreUserDetail.objects.filter(cu_user =designation)
if desg_data:
for desg in desg_data:
return desg.designation.cod_name
else:
return None
def department(department):
if isinstance(department,(str)):
desg_data = TCoreUserDetail.objects.filter(cu_user__username =department)
if desg_data:
for desg in desg_data:
return desg.department.cd_name
else:
return None
elif isinstance(department,(int)):
desg_data = TCoreUserDetail.objects.filter(cu_user =department)
if desg_data:
for desg in desg_data:
return desg.department.cd_name
else:
return None
def getHostWithPort(request):
protocol = 'https://' if request.is_secure() else 'http://'
url = protocol+request.get_host()+'/'
#print('url',url)
return url
def raw_query_extract(query):
return query.query | [
"abhishek.singh@shyamfuture.com"
] | abhishek.singh@shyamfuture.com |
2aa9aa7a5e03efbfd148d48e3641f8958d935c5c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_117/682.py | fdb052b6206ab635aa8befdf177223b0490ce73e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from numpy import empty, array, reshape, zeros
def read_pattern(m,n):
ar = array(map(int, sum([sys.stdin.readline().strip().split(' ') for a in range(m)],[])))
return reshape(ar, (m,n))
def solve(n):
m, n = map(int, sys.stdin.readline().split(' '))
pattern = read_pattern(m,n)
lawn = zeros((m,n), dtype=int)
lawn.fill(100)
# lines
for j in range(m):
if pattern[j,:].max() <= lawn[j,:].min():
lawn[j,:] = lawn[j,:].clip(0, pattern[j,:].max())
for j in range(n):
if pattern[:,j].max() <= lawn[:,j].min():
lawn[:,j] = lawn[:,j].clip(0, pattern[:,j].max())
if (pattern == lawn).all():
return True
return False
if __name__ == "__main__":
N = int(sys.stdin.readline())
for n in range(N):
if solve(n):
print "Case #{n}: YES".format(n=n+1)
else:
print "Case #{n}: NO".format(n=n+1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9a6cf0b3eef8453bda85efcb047570913328df11 | fc3f784c8d00f419b11cbde660fe68a91fb080ca | /algoritm/20하반기 코딩테스트/.카카오기출/쿠키구입.py | 7aec711f931dadca16ca4ad6e2314022f0adfae9 | [] | no_license | choo0618/TIL | 09f09c89c8141ba75bf92657ac39978913703637 | 70437a58015aecee8f3d86e6bfd0aa8dc11b5447 | refs/heads/master | 2021-06-25T07:01:34.246642 | 2020-12-21T04:57:13 | 2020-12-21T04:57:13 | 163,782,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | def solution(cookie):
Len=len(cookie)
answer=0
Sum=sum(cookie)
def Chk(A,B):
if B%2:return 0
for m in range(a,b+1):
A+=cookie[m]
B-=cookie[m]
if A==B:return A
elif A>B:return 0
for a in range(Len-1):
tmp=Sum
for b in range(Len-1,a,-1):
answer=max(answer,Chk(0,tmp))
tmp-=cookie[b]
if tmp<2*answer:break
Sum-=cookie[a]
if Sum<answer*2:return answer
return answer
solution([1, 1, 2, 3]) | [
"choo0618@naver.com"
] | choo0618@naver.com |
3661e9be4ba4631580df807c9d3591b2478fc8ea | b34c2e2ccb3dcda09bab17e3082627c8401cc67b | /bank/api/bank_api.py | a60dab611be6a0126475132c99c93a73ee80bd38 | [] | no_license | udwivedi394/django_api_app | 7cc80c68064e9349bd0ca21b1f794be21ec78dfc | bcc918a3dc6263017e54a3ea34086ed51aa7e7cb | refs/heads/master | 2020-05-07T21:45:02.960050 | 2019-04-14T17:09:37 | 2019-04-14T17:09:37 | 180,916,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | import json
from django.db import transaction
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from rest_framework.exceptions import APIException
from rest_framework.views import APIView
from bank.api.bank_api_processor import BranchDetails, BranchFinderInCity
from bank.api.serializers import BranchIFSCInputSerializer, BranchFinderInputSerializer
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class BranchDetailer(APIView):
def get(self, request):
try:
response = self._process_request(request)
response_json = json.dumps(response)
except Exception as e:
raise APIException(str(e))
return HttpResponse(response_json, status=200)
def _process_request(self, request):
serializer = BranchIFSCInputSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
ifsc = serializer.validated_data['ifsc']
return BranchDetails().execute(ifsc=ifsc)
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class BranchFinder(APIView):
def get(self, request):
try:
response = self._process_request(request)
response_json = json.dumps(response)
except Exception as e:
raise APIException(str(e))
return HttpResponse(response_json, status=200)
def _process_request(self, request):
serializer = BranchFinderInputSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
name = serializer.validated_data['name']
city = serializer.validated_data['city']
return BranchFinderInCity().execute(name=name, city=city)
| [
"utkarshdwivedi394@gmail.com"
] | utkarshdwivedi394@gmail.com |
96247780730626ed382d3dfaee4df66524f0fc30 | 59080f5116b9e8f625b5cc849eb14b7ff9d19f3d | /124 rpc/msg_pb2.py | 23fe08aa72bfcbf531911d26beccb3cd695daaf0 | [] | no_license | yyq1609/Python_road | eda2bcd946b480a05ec31cdcb65e35b3f3e739d1 | e9ba2f47c8dd2d00a6e5ddff03c546152efd8f49 | refs/heads/master | 2020-09-11T11:51:35.903284 | 2019-11-11T13:02:21 | 2019-11-11T13:02:21 | 222,054,462 | 1 | 0 | null | 2019-11-16T05:58:13 | 2019-11-16T05:58:12 | null | UTF-8 | Python | false | true | 3,570 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: msg.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='msg.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\tmsg.proto\"\x1a\n\nMsgRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1a\n\x0bMsgResponse\x12\x0b\n\x03msg\x18\x01 \x01(\t23\n\nMsgService\x12%\n\x06GetMsg\x12\x0b.MsgRequest\x1a\x0c.MsgResponse\"\x00\x62\x06proto3')
)
_MSGREQUEST = _descriptor.Descriptor(
name='MsgRequest',
full_name='MsgRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='MsgRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=39,
)
_MSGRESPONSE = _descriptor.Descriptor(
name='MsgResponse',
full_name='MsgResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='msg', full_name='MsgResponse.msg', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=41,
serialized_end=67,
)
DESCRIPTOR.message_types_by_name['MsgRequest'] = _MSGREQUEST
DESCRIPTOR.message_types_by_name['MsgResponse'] = _MSGRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MsgRequest = _reflection.GeneratedProtocolMessageType('MsgRequest', (_message.Message,), {
'DESCRIPTOR' : _MSGREQUEST,
'__module__' : 'msg_pb2'
# @@protoc_insertion_point(class_scope:MsgRequest)
})
_sym_db.RegisterMessage(MsgRequest)
MsgResponse = _reflection.GeneratedProtocolMessageType('MsgResponse', (_message.Message,), {
'DESCRIPTOR' : _MSGRESPONSE,
'__module__' : 'msg_pb2'
# @@protoc_insertion_point(class_scope:MsgResponse)
})
_sym_db.RegisterMessage(MsgResponse)
_MSGSERVICE = _descriptor.ServiceDescriptor(
name='MsgService',
full_name='MsgService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=69,
serialized_end=120,
methods=[
_descriptor.MethodDescriptor(
name='GetMsg',
full_name='MsgService.GetMsg',
index=0,
containing_service=None,
input_type=_MSGREQUEST,
output_type=_MSGRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_MSGSERVICE)
DESCRIPTOR.services_by_name['MsgService'] = _MSGSERVICE
# @@protoc_insertion_point(module_scope)
| [
"958976577@qq.com"
] | 958976577@qq.com |
8caada00d03e7730fef604f1bac57dc6925a29f7 | f51c6d0cebb27c377ce9830deec4b727b9b2ee90 | /AI/BOOK - Problem Solving with Algorithms and Data Structures - EXAMPLES/Listings/listing_1_1.py | 401dd53d09553f35e9b4e840ee00ed270ecae51c | [] | no_license | dbbudd/Python-Experiments | 1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8 | b6d294bf11a5c92b8578d16aa2f63cc27fc47b07 | refs/heads/master | 2020-04-17T02:21:36.693593 | 2019-01-17T00:18:34 | 2019-01-17T00:18:34 | 166,130,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | def squareroot(n):
root = n/2 #initial guess will be 1/2 of n
for k in range(20):
root = (1/2)*(root + (n / root))
return root
| [
"dbbudd@gmail.com"
] | dbbudd@gmail.com |
addaa8d9c141661d40806d61bb19fb4cc977f2ec | 01fa2aca31eb73a559d192fd29e44350f26a13a9 | /HAX/18.CocoJoe/script.module.lambdascrapers/lib/lambdascrapers/sources_overeasy/en_de/iwantmyshow.py | b4f83e473feaf904ab832be9fa246515efd676a8 | [
"Beerware"
] | permissive | RandomIntermition/k4y108837s | b4beedeff375645bd4fa9ad348631a9a9f3640b6 | e9115aad49795dfe30a96c278cedaf089abcc11d | refs/heads/master | 2022-05-01T18:45:57.298903 | 2022-03-30T03:41:08 | 2022-03-30T03:41:08 | 109,356,425 | 1 | 0 | null | 2019-11-08T02:20:47 | 2017-11-03T05:36:48 | Python | UTF-8 | Python | false | false | 7,914 | py | # -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
import xbmcgui
from resources.lib.modules import cleantitle, client, debrid, log_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['myvideolinks.net', 'iwantmyshow.tk']
self.base_link = 'http://myvideolinks.net'
self.search_link = 'def/?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except Exception:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None:
return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except Exception:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (
data['tvshowtitle'],
int(data['season']),
int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'],
data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % urllib.quote_plus(query)
r = client.request(url)
r = client.parseDOM(r, 'h2')
z = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [
(i[0],
i[1],
re.sub('(\.|\(|\[|\s)(\d{4}|3D)(\.|\)|\]|\s|)(.+|)', '', i[1]),
re.findall('[\.|\(|\[|\s](\d{4}|)([\.|\)|\]|\s|].+)', i[1])) for i in z]
r = [(i[0], i[1], i[2], i[3][0][0], i[3][0][1]) for i in r if i[3]]
r = [(i[0], i[1], i[2], i[3], re.split('\.|\(|\)|\[|\]|\s|\-', i[4])) for i in r]
r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[2]) and data['year'] == i[3]]
r = [i for i in r if not any(x in i[4]
for x in ['HDCAM', 'CAM', 'DVDR', 'DVDRip', 'DVDSCR', 'HDTS', 'TS', '3D'])]
r = [i for i in r if '1080p' in i[4]][:1] + [i for i in r if '720p' in i[4]][:1]
if 'tvshowtitle' in data:
posts = [(i[1], i[0]) for i in z]
else:
posts = [(i[1], i[0]) for i in z]
hostDict = hostprDict + hostDict
items = []
for post in posts:
try:
t = post[0]
u = client.request(post[1])
u = re.findall('\'(http.+?)\'', u) + re.findall('\"(http.+?)\"', u)
u = [i for i in u if '/embed/' not in i]
u = [i for i in u if 'youtube' not in i]
items += [(t, i) for i in u]
except Exception:
pass
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title):
raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr:
raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt):
raise Exception()
if any(i in ['extras'] for i in fmt):
raise Exception()
if '1080p' in fmt:
quality = '1080p'
elif '720p' in fmt:
quality = 'HD'
else:
quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt):
quality = 'CAM'
info = []
if '3d' in fmt:
info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except Exception:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt):
info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']):
raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if host not in hostDict:
raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en',
'url': url, 'info': info, 'direct': False, 'debridonly': debrid.status()})
except Exception:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check:
sources = check
return sources
except Exception:
return sources
def resolve(self, url):
return url
| [
"github+github@github.github"
] | github+github@github.github |
6e3fc1ac707dd1993a33342c09e738411880714c | 7ab16fa64eedde37cefabdbb8b7e176b28590c36 | /controller/connector/docs/simple/reverse_tcp_agent.py | c4d562d1dd7a744c3adb1120a436effb3943dd28 | [] | no_license | how2how/PlayGround | 240b5ab512dc1992f551f4af9289362b5018dba3 | cc10ee74ee3ee86b8b769cbf6237745bf1614adb | refs/heads/master | 2020-03-07T00:49:02.530872 | 2018-05-08T15:09:13 | 2018-05-08T15:09:13 | 127,166,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | from server.core.orchestration import SimpleOrchestrator
orch_obj = SimpleOrchestrator(
"Our passphrase can be anything! &^&%{}",
out_length = 20,
in_length = 20,
)
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("127.0.0.1",1234))
def send( data ) :
s.sendall( data )
def recv() : # Return for every 20 bytes
return s.recv(20) # This will automatically block as socket.recv() is a blocking method
from server.core.handlers import BaseHandler
class MyAgent_Handler( BaseHandler ) :
""" This class tries hard to be self-explanatory """
def __init__(self, recv, send, orch, **kw) :
super( MyAgent_Handler, self ).__init__( recv, send, orch, **kw )
print ( "[!] Agent with Orchestrator ID: '{}' started!".format( orch.getIdentity() ) )
print()
def onMessage( self, stream, message ) :
print ( "[+] Message arrived!" )
print ( "{} -> {}".format(stream, message) )
print ("[>] Sending the received message in reverse order!")
self.preferred_send( message[::-1] ) # Will respond with the reverse of what was received!
def onChunk( self, stream, message ) :
print ( "[+] Chunk arrived for stream '{}' !".format(stream) )
if message :
print ("[*] Message assembled. onMessage() will be called next!")
print()
def onNotRecognised(self) :
print ("[-] Got some Gibberish")
print ("Initialized the Orchestrator with wrong passphrase?")
print()
handler_obj = MyAgent_Handler(recv, send, orch_obj)
from time import sleep
while True : sleep(10)
| [
"test@test.com"
] | test@test.com |
93978618a6ba0603136f2dbec1dfdfb4e5fe055a | 1e297340a3c85a29bbad1b27b076d8ad50086e7a | /algorithm/BinarySearch/BOJ_10815(binary_search).py | 47e0f364a9bb5bf3c8855715e7b29fec31491ba4 | [] | no_license | onewns/TIL | a5ee524604feb77d0d982ead2ea0265fa78c9349 | fa53ede5194979ccc54eeae882399799afe08bcf | refs/heads/master | 2023-08-29T02:50:19.982012 | 2021-10-11T12:24:59 | 2021-10-11T12:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import sys
sys.stdin = open('../input.txt', 'r')
def binary_search(num, start, end):
left, right = start, end - 1
mid = (left + right) // 2
if left > right:
return 0
if num == cards[mid]:
return 1
elif num > cards[mid]:
return binary_search(num, mid + 1, end)
else:
return binary_search(num, left, mid)
n = int(input())
cards = sorted(list(map(int, input().split())))
m = int(input())
check_list = list(map(int, input().split()))
ans = []
for cn in check_list:
ans.append(binary_search(cn, 0, n))
print(*ans)
| [
"wonjun9090@naver.com"
] | wonjun9090@naver.com |
a55a94207d6eebdfea63144d689f0a8ed1c3a00b | ce8a7ed7afb9a11a22df905f55749c1e06a98b63 | /amazon/python/17. Letter Combinations of a Phone Number.py | e72e9ed254f36f029c3b55ccb3e87fde05050a16 | [] | no_license | suruisunstat/leetcode_practice | 9da68247263d9b9bec98ab7e0f78bfe58e9dc830 | 1c0ff616ee3753ac9d4571301313f7a6b8ba6f37 | refs/heads/master | 2022-11-09T05:05:55.946361 | 2022-11-05T08:24:41 | 2022-11-05T08:24:41 | 147,136,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
dict_num = {'2':['a','b','c'],'3':['d','e','f'],'4':['g','h','i'],'5':['j','k','l'],'6':['m','n','o'],'7':['p','q','r','s'], '8':['t','u','v'], '9':['w','x','y','z']}
def backtrack(combination, next_digits):
if len(next_digits) == 0:
output.append(combination)
else:
for letter in dict_num[next_digits[0]]:
backtrack(combination + letter, next_digits[1:])
output = []
if digits:
backtrack("",digits)
return output
# Time: O(3 ^ N * 4 ^ M)
# Space: O(3 ^ N * 4 ^ M)
| [
"noreply@github.com"
] | suruisunstat.noreply@github.com |
09e30b7fd5c4de83950b8216c5b2fdaeae13dbbf | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisWprime4500.py | 0c7712eb078cfa84197fb5056d2af33b0c4f59b9 | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'Wprime_4500_weight_v2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName='Analysis'
config.JobType.sendExternalFolder=True# = 'Analysis'
config.JobType.inputFiles=['L1PrefiringMaps_new.root','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles=['L1PrefiringMaps_new.root','PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WprimeToWZToWlepZhad_narrow_M-4500_13TeV-madgraph/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'Automatic'
config.Data.unitsPerJob =180#10
config.Data.totalUnits = -1
config.Data.publication = False
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'Wprime_4500_weight_v2'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"c.chen@cern.ch"
] | c.chen@cern.ch |
3b3a117221d6b53dde8de16074a991941662ffdb | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/11146113.py | 693fb033baff53bcb422a63ef8bda36e903bc01f | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/11146113.py generated: Fri, 27 Mar 2015 15:47:59
#
# Event Type: 11146113
#
# ASCII decay Descriptor: [B0 -> (J/psi(1S) -> mu+ mu-) (phi(1020) -> K+ K-) (K_S0 -> pi+ pi-)]cc
#
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/KKmumuInAcc.py" )
from Configurables import Generation
Generation().EventType = 11146113
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_JpsiphiKs,KKmumupipi=KKmumuInAcc.dec"
Generation().SignalRepeatedHadronization.CutTool = "ListOfDaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
0d61e717f8f7e75de0fcb1bd47dc911cd7bd82c8 | 137ba8a70dfcf94dfe7aeef1599341ecc06ca48f | /project_ex/10_lotto.py | a2e74e17dec7fa2d55a90d94167acf5e665eabba | [] | no_license | smtamh/oop_python_ex | e1d3a16ade54717d6cdf1759b6eba7b27cfc974e | bd58ee3bf13dad3de989d5fd92e503d5ff949dd9 | refs/heads/master | 2020-09-22T08:35:19.847656 | 2019-11-13T02:47:06 | 2019-11-13T02:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | """
Created on 2014. 8. 10.
@author: uyu423@gmail.com
http://luckyyowu.tistory.com/209
"""
import random
# 0. 객체지향을 통해 크게 3가지 형태의 클래스가 나온다. Data, B.O, U.I
# 1. 데이터를 클래스로 만들 수 있음(Data)
# 2. 프로그램이 해야하는 기능을 모아서 클래스로 만들 수 있음(Business Object(B.O). B.O는 입출력을 처리하지 않음(중요)
# 3. 실제 입출력을 담당하는 UI 클래스도 있음(UI)
# 4. 모든 프로그램은 CRUD(Create, Read, Update, Delete)가 기본
class LottoBall: # Data
def __init__(self, num):
self.num = num
class LottoMachine: # B.O
def __init__(self):
self.ballList = []
for i in range(1, 46):
self.ballList.append(LottoBall(i))
def selectBalls(self):
random.shuffle(self.ballList)
return self.ballList[0:6]
class LottoUI: # U.I
def __init__(self):
self.machine = LottoMachine()
def playLotto(self):
input("로또를 뽑을까요?")
selectedBalls = self.machine.selectBalls()
for ball in selectedBalls:
print("%d" % (ball.num))
# main
ui = LottoUI()
ui.playLotto()
| [
"kadragon@sasa.hs.kr"
] | kadragon@sasa.hs.kr |
e15fbbc01d14573d8ad538799b6a7f1880f85372 | d03a31d080267010c7bbd5ac9cbaa94cffa23292 | /tests/test_utils.py | 19edf3ba3488d535380466b4472b639ffe03e69d | [] | no_license | chmouel/python-nonobot | 0d654f4e608f77bb85d0adb16b9d3639a2586f0b | 4e07ec1a4ba755a6f7070f5778fe734a3180ad70 | refs/heads/master | 2021-01-01T18:02:30.832406 | 2014-12-05T10:48:14 | 2014-12-05T10:48:14 | 17,373,952 | 0 | 4 | null | 2014-03-15T09:04:21 | 2014-03-03T17:34:52 | Python | UTF-8 | Python | false | false | 1,231 | py | # -*- coding: utf-8 -*-
# Author: Chmouel Boudjnah <chmouel@chmouel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import urllib
import nonobot.utils as nutils
class UtilsTest(unittest.TestCase):
def test_clean_nick(self):
self.assertEqual(nutils.clean_nick("foo_____"),
'foo')
def test_clean_nick_nothing_on_empty(self):
self.assertIsNone(nutils.clean_nick(""))
def test_quoted(self):
self.assertEqual(nutils.clean_nick("foo***"),
urllib.quote("foo***"))
def test_clean_nick_with_space(self):
name = "foo bar"
self.assertEqual(nutils.clean_nick(name),
urllib.quote(name))
| [
"chmouel@chmouel.com"
] | chmouel@chmouel.com |
835478f976795573d8355bbee93293234d4cb55f | c87ae09a0229b4b4620c511b0c51eb685ec22b99 | /Python全栈学习/第四模块 网络编程进阶&数据库开发/practise/基于多进程的socket通信/队列的使用.py | b186e6d31bb0a9672c52aa8b644e8f974705eaaf | [] | no_license | caideyang/python2018 | 050f4c29c37b5bec963e77e0724cd05a9350deed | b7a3a728ef36b43448dc5ff594fdba500b67ad53 | refs/heads/master | 2020-03-25T06:02:54.699941 | 2018-11-01T23:04:29 | 2018-11-01T23:04:29 | 143,480,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/26 10:41
from multiprocessing import Queue
import time
if __name__ == "__main__":
q = Queue(3) # 创建队列,最大深度3
q.put("hello") # 往队列存放消息
q.put([1,2,3,4])
q.put({"name": "caideyang"})
# time.sleep(1)
print(q.empty()) # 判断队列是否为空
print(q.full()) # 判断队列是否满了
print(q.get()) # 从队列取数据
print(q.get()) | [
"deyangcai@163.com"
] | deyangcai@163.com |
515436b4d5fe3ddd0030470fde74f0965147a76f | 96cfaaa771c2d83fc0729d8c65c4d4707235531a | /Configuration/Spring08Production/python/Spring08_PhotonJetpt30-50_GEN_cfg.py | 8cf29fde0fbca5f06b831fcb9e3f0f9fe8054a8d | [] | no_license | khotilov/cmssw | a22a160023c7ce0e4d59d15ef1f1532d7227a586 | 7636f72278ee0796d0203ac113b492b39da33528 | refs/heads/master | 2021-01-15T18:51:30.061124 | 2013-04-20T17:18:07 | 2013-04-20T17:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Gen")
process.load("FWCore.MessageService.MessageLogger_cfi")
# control point for all seeds
process.load("Configuration.StandardSequences.SimulationRandomNumberGeneratorSeeds_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.Spring08Production.Spring08_PhotonJetpt30_50_cfi")
process.load("Configuration.EventContent.EventContent_cff")
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/Spring08Production/data/Spring08_PhotonJetpt30-50_GEN.cfg,v $'),
annotation = cms.untracked.string('FastSim PhotonJet Pthat 30-50 for Spring08')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.GEN = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN')
),
fileName = cms.untracked.string('PhotonJetpt30-50.root')
)
process.e = cms.EndPath(process.GEN)
process.schedule = cms.Schedule(process.e)
| [
"sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch"
] | sha1-197b93d87bf2e1eb4349df76c6ec25fd8f1f348e@cern.ch |
2d4343cd3084b61e1e48672feeb7de774d2d4833 | 6f3b3f29b0ed43f056526a7d96df27c623cf8a29 | /czsc/enum.py | ccd2fd8436e1ff3e6e6431b4b1183683bb279deb | [
"MIT"
] | permissive | dst1213/czsc | a163c362d162110557e64e8ea8b41350d4d90a00 | 939803a9b196c19db3d8498f63276a4fdb8a442b | refs/heads/master | 2023-04-22T04:17:22.703347 | 2021-04-30T13:53:40 | 2021-04-30T13:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,016 | py | # coding: utf-8
from enum import Enum
class Mark(Enum):
D = "底分型"
G = "顶分型"
class Direction(Enum):
Up = "向上"
Down = "向下"
class Freq(Enum):
F1 = "1分钟"
F5 = "5分钟"
F15 = "15分钟"
F30 = "30分钟"
F60 = "60分钟"
D = "日线"
W = "周线"
M = "月线"
class Signals(Enum):
Other = "Other~其他"
Y = "Y~是"
N = "N~否"
INB = "INB~向下笔买点区间"
INS = "INS~向上笔卖点区间"
FXB = "FXB~向下笔结束分型左侧高点升破"
FXS = "FXS~向上笔结束分型左侧低点跌破"
BU0 = "BU0~向上笔顶分完成"
BU1 = "BU1~向上笔走势延伸"
BD0 = "BD0~向下笔底分完成"
BD1 = "BD1~向下笔走势延伸"
# TK = Triple K
TK1 = "TK1~三K底分"
TK2 = "TK2~三K上涨"
TK3 = "TK3~三K顶分"
TK4 = "TK4~三K下跌"
# ==================================================================================================================
# 信号值编码规则:
# 多空:L - 多头信号;S - 空头信号;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推;基础型有着特殊含义,用于因子组合,各种变种形态编号主要用于形态对比研究。
# 组合规则:笔数_多空_编号;如 LA0 表示多头信号A0
# ==================================================================================================================
LA0 = "LA0~aAb式底背驰"
LB0 = "LB0~aAbcd式底背驰"
LC0 = "LC0~aAbBc式底背驰"
LD0 = "LD0~abcAd式底背驰"
LE0 = "LE0~ABC式底背驰"
LF0 = "LF0~类趋势底背驰"
LG0 = "LG0~上颈线突破"
LH0 = "LH0~向上中枢完成"
LI0 = "LI0~三买"
LJ0 = "LJ0~向上三角扩张中枢"
LK0 = "LK0~向上三角收敛中枢"
LL0 = "LL0~向上平台型中枢"
# ------------------------------------------------------------------------------------------------------------------
LA1 = "LA1~aAb式底背驰特例一"
LA2 = "LA2~aAb式底背驰特例二"
LA3 = "LA3~aAb式底背驰特例三"
LB1 = "LB1~aAbcd式底背驰特例一"
LB2 = "LB2~aAbcd式底背驰特例二"
LB3 = "LB3~aAbcd式底背驰特例三"
LC1 = "LC1~aAbBc式底背驰特例一"
LC2 = "LC2~aAbBc式底背驰特例二"
LC3 = "LC3~aAbBc式底背驰特例三"
LD1 = "LD1~abcAd式底背驰特例一"
LD2 = "LD2~abcAd式底背驰特例二"
LD3 = "LD3~abcAd式底背驰特例三"
LE1 = "LE1~ABC式底背驰特例一"
LE2 = "LE2~ABC式底背驰特例二"
LE3 = "LE3~ABC式底背驰特例三"
LF1 = "LF1~类趋势底背驰特例一"
LF2 = "LF2~类趋势底背驰特例二"
LF3 = "LF3~类趋势底背驰特例三"
LG1 = "LG1~上颈线突破特例一"
LG2 = "LG2~上颈线突破特例二"
LG3 = "LG3~上颈线突破特例三"
LH1 = "LH1~向上中枢完成特例一"
LH2 = "LH2~向上中枢完成特例二"
LH3 = "LH3~向上中枢完成特例三"
LI1 = "LI1~三买特例一"
LI2 = "LI2~三买特例二"
LI3 = "LI3~三买特例三"
LJ1 = "LJ1~向上三角扩张中枢特例一"
LJ2 = "LJ2~向上三角扩张中枢特例二"
LJ3 = "LJ3~向上三角扩张中枢特例三"
LK1 = "LK1~向上三角收敛中枢特例一"
LK2 = "LK2~向上三角收敛中枢特例二"
LK3 = "LK3~向上三角收敛中枢特例三"
LL1 = "LL1~向上平台型中枢特例一"
LL2 = "LL2~向上平台型中枢特例二"
LL3 = "LL3~向上平台型中枢特例三"
# ------------------------------------------------------------------------------------------------------------------
SA0 = "SA0~aAb式顶背驰"
SB0 = "SB0~aAbcd式顶背驰"
SC0 = "SC0~aAbBc式顶背驰"
SD0 = "SD0~abcAd式顶背驰"
SE0 = "SE0~ABC式顶背驰"
SF0 = "SF0~类趋势顶背驰"
SG0 = "SG0~下颈线突破"
SH0 = "SH0~向下中枢完成"
SI0 = "SI0~三卖"
SJ0 = "SJ0~向下三角扩张中枢"
SK0 = "SK0~向下三角收敛中枢"
SL0 = "SL0~向下平台型中枢"
# ------------------------------------------------------------------------------------------------------------------
SA1 = "SA1~aAb式顶背驰特例一"
SA2 = "SA2~aAb式顶背驰特例二"
SA3 = "SA3~aAb式顶背驰特例三"
SB1 = "SB1~aAbcd式顶背驰特例一"
SB2 = "SB2~aAbcd式顶背驰特例二"
SB3 = "SB3~aAbcd式顶背驰特例三"
SC1 = "SC1~aAbBc式顶背驰特例一"
SC2 = "SC2~aAbBc式顶背驰特例二"
SC3 = "SC3~aAbBc式顶背驰特例三"
SD1 = "SD1~abcAd式顶背驰特例一"
SD2 = "SD2~abcAd式顶背驰特例二"
SD3 = "SD3~abcAd式顶背驰特例三"
SE1 = "SE1~ABC式顶背驰特例一"
SE2 = "SE2~ABC式顶背驰特例二"
SE3 = "SE3~ABC式顶背驰特例三"
SF1 = "SF1~类趋势顶背驰特例一"
SF2 = "SF2~类趋势顶背驰特例二"
SF3 = "SF3~类趋势顶背驰特例三"
SG1 = "SG1~下颈线突破特例一"
SG2 = "SG2~下颈线突破特例二"
SG3 = "SG3~下颈线突破特例三"
SH1 = "SH1~向下中枢完成特例一"
SH2 = "SH2~向下中枢完成特例二"
SH3 = "SH3~向下中枢完成特例三"
SI1 = "SI1~三卖特例一"
SI2 = "SI2~三卖特例二"
SI3 = "SI3~三卖特例三"
SJ1 = "SJ1~向下三角扩张中枢特例一"
SJ2 = "SJ2~向下三角扩张中枢特例二"
SJ3 = "SJ3~向下三角扩张中枢特例三"
SK1 = "SK1~向下三角收敛中枢特例一"
SK2 = "SK2~向下三角收敛中枢特例二"
SK3 = "SK3~向下三角收敛中枢特例三"
SL1 = "SL1~向下平台型中枢特例一"
SL2 = "SL2~向下平台型中枢特例二"
SL3 = "SL3~向下平台型中枢特例三"
# --------------------------------------------------------------------------------------------
# 信号值编码规则:
# 笔数:X3 - 三笔信号;X5 - 五笔信号;X7 - 七笔信号;X9 - 九笔信号;
# 多空:L - 多头信号;S - 空头信号;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推
# 组合规则:笔数_多空_编号;如 X5LA0 表示五笔多头信号A0
# ============================================================================================
# 三笔形态信号
# 具体描述:
# --------------------------------------------------------------------------------------------
X3LA0 = "X3LA0~向下不重合"
X3LB0 = "X3LB0~向下奔走型中枢"
X3LC0 = "X3LC0~向下三角收敛中枢"
X3LD0 = "X3LD0~向下三角扩张中枢"
X3LE0 = "X3LE0~向下盘背中枢"
X3LF0 = "X3LF0~向下无背中枢"
X3SA0 = "X3SA0~向上不重合"
X3SB0 = "X3SB0~向上奔走型中枢"
X3SC0 = "X3SC0~向上三角收敛中枢"
X3SD0 = "X3SD0~向上三角扩张中枢"
X3SE0 = "X3SE0~向上盘背中枢"
X3SF0 = "X3SF0~向上无背中枢"
class Factors(Enum):
Other = "Other~其他"
Y = "Y~是"
N = "N~否"
# ==================================================================================================================
# 因子值编码规则:
# 类型:
# L1 - 一买/类一买;L2 - 二买/类二买;L3 - 三买/类三买;
# S1 - 一卖/类一卖;S2 - 二卖/类二卖;S3 - 三卖/类三卖;
# 编号:A0 - A类基础型;A1 - A类变种1 ... 以此类推
# 组合规则为 类型_编号
# ==================================================================================================================
L1A0 = "L1A0~一买"
L1A1 = "L1A1~一买特例一"
L1A2 = "L1A2~一买特例二"
L1A3 = "L1A3~一买特例三"
L1A4 = "L1A4~一买特例四"
L1A5 = "L1A5~一买特例五"
L2A0 = "L2A0~二买"
L2A1 = "L2A1~二买特例一"
L2A2 = "L2A2~二买特例二"
L2A3 = "L2A3~二买特例三"
L2A4 = "L2A4~二买特例四"
L2A5 = "L2A5~二买特例五"
L3A0 = "L3A0~三买"
L3A1 = "L3A1~三买特例一"
L3A2 = "L3A2~三买特例二"
L3A3 = "L3A3~三买特例三"
L3A4 = "L3A4~三买特例四"
L3A5 = "L3A5~三买特例五"
# ------------------------------------------------------------------------------------------------------------------
S1A0 = "S1A0~一卖"
S1A1 = "S1A1~一卖特例一"
S1A2 = "S1A2~一卖特例二"
S1A3 = "S1A3~一卖特例三"
S1A4 = "S1A4~一卖特例四"
S1A5 = "S1A5~一卖特例五"
S2A0 = "S2A0~二卖"
S2A1 = "S2A1~二卖特例一"
S2A2 = "S2A2~二卖特例二"
S2A3 = "S2A3~二卖特例三"
S2A4 = "S2A4~二卖特例四"
S2A5 = "S2A5~二卖特例五"
S3A0 = "S3A0~三卖"
S3A1 = "S3A1~三卖特例一"
S3A2 = "S3A2~三卖特例二"
S3A3 = "S3A3~三卖特例三"
S3A4 = "S3A4~三卖特例四"
S3A5 = "S3A5~三卖特例五"
# ==================================================================================================================
| [
"zeng_bin8888@163.com"
] | zeng_bin8888@163.com |
cc8b434ce82b6e1625a617bbbd89b70bd16b8524 | f225b35d49562e7a1114968bdf9128dbc4cd91ab | /myspider/items.py | fa8e60c784f6743287563124add7390aebc383f6 | [] | no_license | 15032373556/scrapy_exercise | 1948ce42102f99e414ae214b27163eb1d9e3b338 | 7a6e8b7a395044bda3acb649ab8f5a74bc854d82 | refs/heads/master | 2022-11-25T13:29:28.726984 | 2020-07-25T03:09:41 | 2020-07-25T03:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ItcastItem(scrapy.Item):
# 抓取 1.讲师姓名 2.讲师职称 3.讲师个人信息
# 测试提交代码
name = scrapy.Field()
title = scrapy.Field()
info = scrapy.Field()
| [
"1798549164@qq.com"
] | 1798549164@qq.com |
705796328fb21af7633d2c714b02f3ea143c60d9 | bd6e9f4f6261795fd876107b21b04cafec00b5d7 | /web/code/src/my_proj/settings/base.py | e4e7135dce4402369d49d6a25f66aef51faf12d5 | [] | no_license | hema71024/StudentPortal | 774f4a7167ce624c60e5b6c8a3c8ccac18860616 | 2aa53f57120acfd93178e5a5eee49c69a1527fdd | refs/heads/master | 2021-04-24T07:21:05.921571 | 2018-04-03T08:56:59 | 2018-04-03T08:56:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,286 | py | """
Django settings for my_proj project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
join(BASE_DIR, 'forumapp/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Added Bt Me
# 'social.apps.django_app.context_processors.backends',
# 'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'profiles',
'accounts',
# My Added
'forumapp',
# 'social.apps.django_app.default',
# 'social_django',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'my_proj.urls'
WSGI_APPLICATION = 'my_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Calcutta'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = []
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
################################################################################
# My Added #####################################################################
# FOr Social Login##############################################################
AUTHENTICATION_BACKENDS = (
'social_core.backends.open_id.OpenIdAuth', # for Google authentication
'social_core.backends.google.GoogleOpenId', # for Google authentication
'social_core.backends.google.GoogleOAuth2', # for Google authentication
'social_core.backends.github.GithubOAuth2', # for Github authentication
'social_core.backends.facebook.FacebookOAuth2', # for Facebook authentication
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '365081772682-um9ctc90c5g9a7n63l3drhhmeqlvgvk8.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'tGur-f8jM7wHO-pL3uNsYqLv'
################################################################################
# For Mail Sending | [
"ashutoshhathidara98@gmail.com"
] | ashutoshhathidara98@gmail.com |
80d6314141d4f24833a5ea2410e5ce6f0c2c9472 | 074afd26d00bb742b03c12891b057ab263e640bf | /LeetCode 30 days/week1.2.py | ffcfa748b1935152b9419bb6cf112f940f619277 | [] | no_license | IsmailTitas1815/Data-Structure | 7a898800b1e53c778b1f2f11b0df259e52c20140 | fece8dd97d3e162e39fc31d5f3498a6dac49b0f0 | refs/heads/master | 2023-02-05T10:39:49.349484 | 2020-12-21T13:37:22 | 2020-12-21T13:37:22 | 296,343,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | # import re
# s = '123 456-7890'
# new_s = [int(i) for i in re.findall('\d', s)]
# unformattedPhone = "1239084590348509 456-7890"
# numbersList = [int(s) for s in unformattedPhone if s.isdigit()]
# print(numbersList)
class Solution:
def isHappy(self,num):
setofvalue = set()
while num!=1:
num = sum(int(i)**2 for i in str(num))
if num in setofvalue:
return False
setofvalue.add(num)
return True
s=0
old = 0
num = int(input())
obj = Solution()
boo = obj.isHappy(num)
print(boo)
#
# def happy_numbers(n):
# past = set()
# while n != 1:
# n = sum(int(i)**2 for i in str(n))
# if n in past:
# return False
# past.add(n)
# return True
# print([x for x in range(500) if happy_numbers(x)][:10]) | [
"titas.sarker1234@gmail.com"
] | titas.sarker1234@gmail.com |
def2f5d76a06abfa75bee8540d5a5982b97fa204 | 9fb1c85a6d39c08e2a3cc235335bc482ad909b71 | /prowler/providers/aws/services/route53/route53_domains_transferlock_enabled/route53_domains_transferlock_enabled.py | d49160981b34ad878af72233ab6ebf3869bfae89 | [
"Apache-2.0"
] | permissive | muharihar/prowler | 06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a | 25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b | refs/heads/master | 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null | UTF-8 | Python | false | false | 1,012 | py | from prowler.lib.check.models import Check, Check_Report_AWS
from prowler.providers.aws.services.route53.route53domains_client import (
route53domains_client,
)
class route53_domains_transferlock_enabled(Check):
def execute(self) -> Check_Report_AWS:
findings = []
for domain in route53domains_client.domains.values():
report = Check_Report_AWS(self.metadata())
report.resource_id = domain.name
report.region = domain.region
if domain.status_list and "clientTransferProhibited" in domain.status_list:
report.status = "PASS"
report.status_extended = (
f"Transfer Lock is enabled for the {domain.name} domain"
)
else:
report.status = "FAIL"
report.status_extended = (
f"Transfer Lock is disabled for the {domain.name} domain"
)
findings.append(report)
return findings
| [
"noreply@github.com"
] | muharihar.noreply@github.com |
15d8600fcc62bae425faaf90085a8f09360ab77e | df038c9a84ca9b11bbef86d84d2e6feb6fd733bf | /setup.py | d7698c8b3fa3d0b9da3a92af8fb21d3751e3cf58 | [
"BSD-2-Clause"
] | permissive | wybaby/PSpider | d31ff8cbde1a3f23d05c1684c455beea2b48c915 | 5087fc20589878fa123daa113213fbf17282a35b | refs/heads/master | 2021-01-22T01:55:16.258596 | 2017-06-23T03:35:04 | 2017-06-23T07:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # _*_ coding: utf-8 _*_
"""
install script: python3 setup.py install
"""
from setuptools import setup, find_packages
setup(
name="spider",
version="2.4.5",
author="xianhu",
keywords=["spider", "crawler", "multi-threads", "asyncio", "distributed"],
packages=find_packages(exclude=("otherfiles", "test.*")),
package_data={
"": ["*.conf"], # include all *.conf files
},
install_requires=[
"aiohttp>=2.0.0", # aiohttp, http for asyncio
"pybloom_live>=2.0.0", # pybloom-live, fork from pybloom
"redis>=2.10.0", # redis, python client for redis
"requests>=2.10.0", # requests, http for humans
]
)
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
99cbb1c2c9693fe423a01b59ef5289715abab28f | 396ee8958eb753d96a62b1199103c2c1194c08e0 | /creme/ensemble/bagging.py | a8509f03c6ffe22b2ed05d0f2a2d8f770954a48a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ZuoMatthew/creme | fe1499a9071a994587172f908a530522be6b915b | 27d40fa7a5014c94d7f95dee259368c0adc7115c | refs/heads/master | 2020-04-22T20:46:58.100005 | 2019-02-12T17:13:15 | 2019-02-12T17:13:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | import collections
import copy
from sklearn import utils
from .. import base
__all__ = ['BaggingClassifier']
class BaggingClassifier(base.BinaryClassifier):
"""Bagging for classification.
For each incoming observation, each model's `fit_one` method is called `k` times where `k`
is sampled from a Poisson distribution of parameter 1. `k` thus has a 36% chance of being equal
to 0, a 36% chance of being equal to 1, an 18% chance of being equal to 2, a 6% chance of being
equal to 3, a 1% chance of being equal to 4, etc. You can do `scipy.stats.poisson(1).pmf(k)`
for more detailed values.
Parameters:
base_estimator (creme.base.Classifier): The estimator to bag.
Example:
In the following example three logistic regressions are bagged together. The performance is
slightly better than when using a single logistic regression.
::
>>> import creme.compose
>>> import creme.ensemble
>>> import creme.linear_model
>>> import creme.model_selection
>>> import creme.optim
>>> import creme.preprocessing
>>> import creme.stream
>>> from sklearn import datasets
>>> from sklearn import metrics
>>> X_y = creme.stream.iter_sklearn_dataset(
... load_dataset=datasets.load_breast_cancer,
... shuffle=True,
... random_state=42
... )
>>> optimiser = creme.optim.VanillaSGD()
>>> model = creme.compose.Pipeline([
... ('scale', creme.preprocessing.StandardScaler()),
... ('learn', creme.linear_model.LogisticRegression(optimiser))
... ])
>>> model = creme.ensemble.BaggingClassifier(model, n_estimators=3)
>>> metric = metrics.roc_auc_score
>>> creme.model_selection.online_score(X_y, model, metric)
0.991497...
References:
- `Online Bagging and Boosting <https://ti.arc.nasa.gov/m/profile/oza/files/ozru01a.pdf>`_
"""
def __init__(self, base_estimator=None, n_estimators=10, random_state=42):
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimators = [copy.deepcopy(base_estimator) for _ in range(n_estimators)]
self.rng = utils.check_random_state(random_state)
def fit_one(self, x, y):
y_pred = self.predict_proba_one(x)
for estimator in self.estimators:
for _ in range(self.rng.poisson(1)):
estimator.fit_one(x, y)
return y_pred
def predict_one(self, x):
votes = collections.Counter((estimator.predict_one(x) for estimator in self.estimators))
return max(votes, key=votes.get)
def predict_proba_one(self, x):
return sum(estimator.predict_proba_one(x) for estimator in self.estimators) / len(self.estimators)
| [
"maxhalford25@gmail.com"
] | maxhalford25@gmail.com |
ee819a6e8372d9aa07f36cdf730a81eaea0c1055 | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Fundamentals/04_Lists/the_office.py | 12c13b6f4c2e6e1f5e8584f7e661696c2d418881 | [] | no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | employees = input().split()
factor = int(input())
employee_happiness = list(map(lambda x: int(x) * factor, employees))
avg_happiness = sum(employee_happiness) / len(employee_happiness)
# above_avg_happy = [employee for employee in employee_happiness if employee >= avg_happiness]
above_avg_happy = list(filter(lambda employee: employee >= avg_happiness, employee_happiness))
if int(len(above_avg_happy)) >= len(employee_happiness) / 2:
print(f'Score: {len(above_avg_happy)}/{len(employee_happiness)}. Employees are happy!')
else:
print(f'Score: {len(above_avg_happy)}/{len(employee_happiness)}. Employees are not happy!')
| [
"rbeecommerce@gmail.com"
] | rbeecommerce@gmail.com |
e0315093c2111b0b43b0c96efd9f3b6ae0dd7d10 | 1a639d185f9c883b7bebf33c577c58b22ac93c7e | /other/sound.py | d69bb732b93754a0a9bbad2d5b75c7350984b2d5 | [] | no_license | gofr1/python-learning | bd09da5b5850b1533a88b858690ed4380b55d33e | 19343c985f368770dc01ce415506506d62a23285 | refs/heads/master | 2023-09-02T15:42:27.442735 | 2021-11-12T10:17:13 | 2021-11-12T10:17:13 | 237,828,887 | 0 | 0 | null | 2021-11-12T10:17:14 | 2020-02-02T20:03:42 | Python | UTF-8 | Python | false | false | 568 | py | #!/usr/bin/env python3
# gTTS (Google Text-to-Speech), a Python library and CLI tool to interface with Google Translate text-to-speech API
# sudo pip3 install gtts
from io import BytesIO
from pygame import mixer
from gtts import gTTS
def speak(text):
with BytesIO() as f:
gTTS(text=text, lang="en").write_to_fp(f)
f.seek(0)
mixer.init()
mixer.music.load(f)
mixer.music.play()
while mixer.music.get_busy():
continue
if __name__ == '__main__':
text = input("What should I say? >>")
speak(text) | [
"gofr.one@gmail.com"
] | gofr.one@gmail.com |
59602afbce466d3a9113e0c7e330db6597cd02fa | cb242b1fdf3889d4df347f3102daf6584a0c40a4 | /threeandthrees/words.py | d562ed188fda92eba7607308a68f44f8b4960f5d | [
"MIT"
] | permissive | bwarren2/threeandthrees | d927711f0927a8e3493cd201ffdd8d930e5586f2 | 2a09a398ab332c27e2e7722e612fa18318b50e60 | refs/heads/master | 2023-05-25T23:28:11.706181 | 2021-04-30T15:12:45 | 2021-04-30T15:12:45 | 68,949,353 | 0 | 0 | MIT | 2021-04-30T15:12:46 | 2016-09-22T18:18:05 | Python | UTF-8 | Python | false | false | 2,546 | py | from collections import defaultdict, OrderedDict
import random
from colorama import init, Fore
import os
import re
init(autoreset=True)
safe_pattern = re.compile('^[a-z]{9}$')
def extract_words():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/american-english.txt', 'r') as f:
raw_data = f.read().split('\n')
data = list(filter(is_clean, raw_data))
return data
def is_clean(word):
return re.search(safe_pattern, word) is not None
def extract_cores(wordlist):
coremap = defaultdict(list)
for word in wordlist:
coremap[word[3:6]].append(word)
return coremap
all_words = extract_words()
coremap = extract_cores(all_words)
class Wordmonger(object):
def __init__(self, all_words, coremap):
self.words = all_words
self.coremap = coremap
self.challenge = OrderedDict()
def answer_count(self, candidate):
value = self.coremap.get(candidate, None)
if value is None:
return 0
else:
return len(value)
def answers(self, candidate):
return self.coremap.get(candidate, None)
def generate(self):
key = random.choice(list(self.coremap.keys()))
return key
# return self.coremap[key]
def check(self, arg):
return arg in self.coremap[arg[3:6]]
def show_challenge(self):
for idx, (key, value) in enumerate(self.challenge.iteritems(), 1):
if value is not None:
print(
"{idx}:\t {color}{word}".format(
**{
'idx': idx, 'word': value, 'color': Fore.GREEN
}
)
)
else:
print(
"{idx}:\t ___{core}___".format(
**{'idx': idx, 'core': key}
)
)
def formulate_challenge(self, n=10):
self.challenge = OrderedDict()
while n > 0:
new_core = random.choice(list(self.coremap.keys()))
if new_core not in list(self.challenge.keys()):
self.challenge[new_core] = None
n -= 1
def claim(self, answer):
key = answer[3:6]
if (
answer in self.coremap[key]
and key in list(self.challenge.keys())
):
self.challenge[key] = answer
return True
else:
return False
monger = Wordmonger(all_words, coremap)
| [
"bwarren2@gmail.com"
] | bwarren2@gmail.com |
7f21a3c4b4eab2603971a2c036ccf0062bc692a0 | 92d5c15b92356de9f66d2d4738f3c6f00ef2796f | /alembic/versions/11a00705ac61_added_a_bunch_of_gra.py | 125da279586118ae1e213bc3e51a31aadf58a062 | [] | no_license | colinmorris/moz-graphs | 2f88472b7ad23ee0c63977c2151ac102af475769 | f412c0564fb210327436da0468f78932bd21dca0 | refs/heads/master | 2016-09-06T04:36:39.322822 | 2013-07-27T22:00:14 | 2013-07-27T22:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | """added a bunch of graph vars for assignee
Revision ID: 11a00705ac61
Revises: 48044ce97c4f
Create Date: 2013-04-08 10:21:12.247290
"""
# revision identifiers, used by Alembic.
revision = '11a00705ac61'
down_revision = '48044ce97c4f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('bugmonths', sa.Column('assignee_constraint_prior_month', sa.Float(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('bugmonths', 'assignee_constraint_prior_month')
### end Alembic commands ###
| [
"colin.morris2@gmail.com"
] | colin.morris2@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.