blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15106812cf7653e88c461073845014f9006b8bb3
|
8b4bb6cc0478b0bb535cc1bcf619b67bddf6c155
|
/sorting/Frequency_Queries .py
|
a2fa8ac8735c99dcab94e98f8a7927fb8a7dc2e4
|
[] |
no_license
|
PiyushChandra17/HackerRank_DSA
|
2da943fcbc09918ba09757b6b0849c42f49bbd22
|
609b8272bf56006833aa8d5385ef331605bcc0e1
|
refs/heads/master
| 2022-12-02T17:56:26.648609
| 2020-08-08T18:37:11
| 2020-08-08T18:37:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
# Complete the freqQuery function below.
def freqQuery(queries):
freq = Counter()
cnt = Counter()
arr = []
for q in queries:
if q[0] == 1:
cnt[freq[q[1]]] -= 1
freq[q[1]] += 1
cnt[freq[q[1]]] += 1
elif q[0] == 2:
if freq[q[1]] > 0:
cnt[freq[q[1]]] -= 1
freq[q[1]] -= 1
cnt[freq[q[1]]] += 1
else:
if cnt[q[1]] > 0:
arr.append(1)
else:
arr.append(0)
return arr
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
ans = freqQuery(queries)
fptr.write('\n'.join(map(str, ans)))
fptr.write('\n')
fptr.close()
|
[
"noreply@github.com"
] |
PiyushChandra17.noreply@github.com
|
bd5bfdad4fb1f2096c2f1618a38e3041863b7c38
|
958fc2764dedf880b0027bcc00d4f042e0fb61b0
|
/natas5.py
|
2e2861fe33ce2f0ffd5ed00472e3b163cebfe8c4
|
[] |
no_license
|
marciopocebon/overthewire_natas_solutions
|
ee950249341abd639042efea8fd817c0951c68b9
|
67c726c74f9e7c0840a4b1c3b633a4bbc185f4a3
|
refs/heads/master
| 2021-01-04T01:06:18.654269
| 2018-10-15T22:33:38
| 2018-10-15T22:33:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
#!/usr/bin/env python
import requests
import re
username = 'natas5'
password = 'iX6IOfmpN7AYOQGPwtn3fXpbaJVJcHfq'
# headers = { "Referer" : "http://natas5.natas.labs.overthewire.org/" }
cookies = { "loggedin" : "1" }
url = 'http://%s.natas.labs.overthewire.org/' % username
# response = requests.get(url, auth = (username, password), headers = headers )
session = requests.Session()
response = session.get(url, auth = (username, password), cookies = cookies )
content = response.text
# print content
print re.findall(' natas6 is (.*)</div>', content)[0]
|
[
"johnhammond010@gmail.com"
] |
johnhammond010@gmail.com
|
2cbf15f90fb9026a383dca7a34fb3f4ca4d06a7d
|
fa89ef4a8eb06dc2015d7116637f230b6891eb8d
|
/refinery/units/formats/pe/dotnet/__init__.py
|
187abebc117fd0d022f710618937f9f2e1c730b3
|
[
"BSD-3-Clause"
] |
permissive
|
binref/refinery
|
f61878d9fddf616fee8edf226df22f6a35238940
|
4c7c3717ae45543b9d7bae60a4af4c00993cf719
|
refs/heads/master
| 2023-08-17T17:02:34.357138
| 2023-08-14T08:43:05
| 2023-08-14T08:43:05
| 228,019,736
| 439
| 48
|
NOASSERTION
| 2023-09-11T10:26:02
| 2019-12-14T12:32:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from typing import Any
from enum import Enum
from hashlib import md5, sha1, sha256, sha512
from zlib import crc32
from refinery.units import Arg, Unit
from refinery.units.encoding.hex import hex
from refinery.units.encoding.esc import esc
from refinery.units.encoding.url import url
from refinery.units.encoding.b64 import b64
from refinery.lib.json import BytesAsArrayEncoder
from refinery.lib.dotnet.types import Blob
class UNIT(Enum):
HEX = hex
ESC = esc
URL = url
B64 = b64
class HASH(Enum):
MD5 = md5
CRC32 = crc32
SHA1 = sha1
SHA256 = sha256
SHA512 = sha512
class DotNetEncoder(BytesAsArrayEncoder):
def default(self, obj):
if isinstance(obj, Blob):
obj = bytes(obj)
try:
return super().default(obj)
except TypeError:
return str(obj)
class JSONEncoderUnit(Unit, abstract=True):
"""
An abstract unit that provides the interface for displaying parsed data
as JSON. By default, binary data is converted into integer arrays.
"""
def __init__(
self,
encode: Arg.Option('-e', group='BIN', choices=UNIT, help=(
'Select an encoder unit used to represent binary data in the JSON output. Available are: {choices}.')) = None,
digest: Arg.Option('-d', group='BIN', choices=HASH, help=(
'Select a hashing algorithm to digest binary data; instead of the data, only the hash will be displayed. The '
'available algorithms are: {choices}.')) = None,
**keywords
):
encode = Arg.AsOption(encode, UNIT)
digest = Arg.AsOption(digest, HASH)
super().__init__(**keywords)
if encode is not None and digest is not None:
raise ValueError('Only one binary data conversion can be specified.')
elif encode is not None:
unit = encode.value()
class CustomEncoder(DotNetEncoder): # noqa
def encode_bytes(self, obj): return unit.reverse(obj).decode('utf8')
elif digest is not None:
class CustomEncoder(DotNetEncoder):
def encode_bytes(self, obj): return digest(obj).hexdigest()
else:
CustomEncoder = DotNetEncoder
self.encoder = CustomEncoder
def to_json(self, obj: Any) -> bytes:
return json.dumps(obj, cls=self.encoder, indent=4).encode(self.codec)
|
[
"rattle@nullteilerfrei.de"
] |
rattle@nullteilerfrei.de
|
c90c46d63978b7b8df5aa9b6761c81c9b33f0160
|
ce9d475cebeaec9cf10c467c577cb05c3b431fad
|
/code/chapter_22_example_07.py
|
14c8271ee19d7a4d7aae1a0b9ae7a72b488b3952
|
[] |
no_license
|
Sundarmax/two-scoops-of-django-2.0-code-examples
|
9c8f98d145aaa5498bb558fc5125379cd39003e5
|
a15b2d4c240e879c03d2facf8592a644e27eb348
|
refs/heads/master
| 2022-04-19T10:14:53.795688
| 2020-03-04T15:16:25
| 2020-03-04T15:16:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
"""
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.11 for Django 2.0 projects. Code Samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial
code".
Permissions
============
In general, you may use the code we've provided with this book in your
programs and documentation. You do not need to contact us for permission
unless you're reproducing a significant portion of the code or using it in
commercial distributions. Examples:
* Writing a program that uses several chunks of code from this course does
not require permission.
* Selling or distributing a digital package from material taken from this
book does require permission.
* Answering a question by citing this book and quoting example code does not
require permission.
* Incorporating a significant amount of example code from this book into your
product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.11, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2017 Two Scoops Press
(978-0-692-91572-1)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at info@twoscoopspress.org.
"""
@mock.patch.object(requests, 'get')
def test_request_failure(self, get)
"""Test if the target site is innaccessible."""
get.side_effect = requests.exception.ConnectionError()
with self.assertRaises(CantListFlavors):
list_flavors_sorted()
@mock.patch.object(requests, 'get')
def test_request_failure(self, get)
"""Test if we can handle SSL problems elegantly."""
get.side_effect = requests.exception.SSLError()
with self.assertRaises(CantListFlavors):
list_flavors_sorted()
|
[
"pydanny@gmail.com"
] |
pydanny@gmail.com
|
d6ab0ddce5ea53451beeea5d56de10025e55a93e
|
f805cf2eef884fa0670332208b1b70ed8549751d
|
/parametres/urls.py
|
3cc11de2d7d0e22a6ae2ac1a737dbbceacd4524a
|
[] |
no_license
|
parheto10/agro_tracability
|
8314ad73f2504dd90af4600938065ca18d93ab07
|
42520a3a2753707ed1100b5cdfb680bf7e00c80f
|
refs/heads/master
| 2023-08-05T16:17:02.481499
| 2021-10-08T16:54:10
| 2021-10-08T16:54:10
| 414,891,821
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,456
|
py
|
from django.urls import path
# from cooperatives.views import parcelle_list
from .views import (
connexion,
loggout,
index,
detail_coop,
map_parcelles,
coop_parcelles,
catre_parcelles,
catre_parcelles_coop
# projet,
# # pepiniere,
# # detail_pepiniere,
# # formation,
# detail_proj,
# localisation,
# # chart,
# prod_coop,
# parcelle_coop,
# localisation_coop,
# section_coop,
# # PlantingApiView,
# sous_section_coop,
# Stats_coop, Production_plan,
# Stats_semences, stat_prod_coop, plants_coop, semences_coop, formations, detail_formation, site_pepinieres,
# coop_pepiniere, pepiniere, pepiniere_coop, export_prods_to_pdf, export_parcelles_to_pdf, export_prod_xls,
# export_parcelle_xls, export_plant_xls, export_formation_xls, ParcellesMapView, ProducteurApiView, ParcelleApiView,
# ParcelleJson, PepiniereJson, PepiniereApiView, FormationApiView, folium_map,
# planting_coop, planting_list, produteur_list, parcelles_list, details_planting_list,
# DetailPlantingJson, DetailPlantingApiView, detail_planting, folium_palntings_map,
# Plantings, detailPlantings, plantings_coop, plantingcoop, update_projet, delete_projet # , ParcelleCooperativeApi
# detail_formation,
)
urlpatterns = [
path('', connexion, name='connexion'),
path('logout', loggout, name='logout'),
path('index/', index, name='accueil'),
path('detail_coop/<int:id>', detail_coop, name='detail_coop'),
# path('projets/', projet, name='projets'),
# path('projets/<int:id>/update/', update_projet, name='update_projet'),
# path('projets/<int:id>/delete/', delete_projet, name='delete_projet'),
# path('pepinieres/', pepiniere, name='pepinieres'),
# path('pepiniere_coop/<int:id>', pepiniere_coop, name='pepiniere_coop'),
# path('formation/<int:id>', formations, name='formations'),
# path('formation/<int:id>/<int:_id>', detail_formation, name='formation'),
# path('Stats_coop/', Stats_coop, name='stats_coop'),
# path('Stats_semences/', Stats_semences, name='stats_semences'),
# path('Plantings/', Plantings, name='Plantings'),
# path('detailPlantings/', detailPlantings, name='detailPlantings'),
# path('plantings_coop/<int:id>', plantings_coop, name='plantings_coop'),
# path('Production_plan/', Production_plan, name='production_plan'),
# path('stat_prod_coop/<int:id>', stat_prod_coop, name='stat_prod_coop'),
# path('plants_coop/<int:id>', plants_coop, name='plants_coop'),
# path('semences_coop/<int:id>', semences_coop, name='semences_coop'),
# # path('pepiniere/', pepiniere, name='pepiniere'),
# # path('formations/', formation, name='formations'),
# path('producteurs/<int:id>', prod_coop, name='prod_coop'),
# path('parcelles/<int:id>', parcelle_coop, name='parcelle_coop'),
# path('sections/<int:id>', section_coop, name='section_coop'),
# path('sous_sections/<int:id>', sous_section_coop, name='sous_section_coop'),
# path('plantings/<int:id>', planting_coop, name='planting_coop'),
# path('planting/<int:id>/<int:_id>', detail_planting, name='planting'),
# path('coordonnes/<int:id>', localisation_coop, name='localisation_coop'),
# path('planting_coop/<int:id>', plantingcoop, name='plantingcoop'),
# path('localisation/', localisation, name='localisation'),
# path('detail_proj/<int:id>', detail_proj, name='detail_proj'),
# path('site_pepinieres/', site_pepinieres, name='site_pepinieres'),
# path('coop_pepiniere/<int:id>', coop_pepiniere, name='coop_pepiniere'),
# # path('detail_pepiniere/<int:id>', detail_pepiniere, name='detail_pepiniere'),
# # path('formation/<int:id>', detail_formation, name='formation'),
# path('chart/<int:id>', chart, name='chart'),
#Export to Excel
# path('cooperative/<int:id>/producteurs/xls/', export_prod_xls, name='export_prod_xls'),
# # path('sections/xls/', export_section_xls, name='export_section_xls'),
# # path('sous_sections/xls/', export_sous_section_xls, name='export_sous_section_xls'),
# path('cooperative/<int:id>/parcelles/xls/', export_parcelle_xls, name='export_parcelle_xls'),
# path('cooperative/<int:id>/plants/xls/', export_plant_xls, name='export_plant_xls'),
# path('cooperative/<int:id>/formations/xls/', export_formation_xls, name='export_formation_xls'),
# Export Données EN PDF
# path('producteurs/pdf/<int:id>', export_prods_to_pdf, name='export_prods_to_pdf'),
# path('parcelles/pdf/<int:id>', export_parcelles_to_pdf, name='export_parcelles_to_pdf'),
#Api Urls
# path('api/producteurs', ProducteurApiView.as_view(), name="producteurs_api"),
# path('api/parcelles', ParcelleApiView.as_view(), name="parcelles_api"),
# path('api/details_plantings', DetailPlantingApiView.as_view(), name="detail_plantings_api"),
# # path('api/parcelles_coop', ParcelleCooperativeApi.as_view(), name="coop_parcelles_api"),
# path('api/pepinieres', PepiniereApiView.as_view(), name="pepinieres_api"),
# path('api/formations', FormationApiView.as_view(), name="formations_api"),
#map leaflet
# path('pepinieres_json/', PepiniereJson.as_view(), name="pepinieres_json"),
# # path('geolocalisation/', ParcelleJson.as_view(), name='geolocalisation'),
# path('geolocalisation/', ParcelleJson, name='geolocalisation'),
# path('details_planting/', DetailPlantingJson.as_view(), name='details_planting'),
# # path('parcelles/data', ParcellesView.as_view(), name="data"),
# path('parcelles/data', parcelle_list, name="data"),
#Folium Map
# path('folium_map/', folium_map, name="folium_map"),
# path('folium_palntings_map/', folium_palntings_map, name="folium_palntings_map"),
# Api
path('api/v1/map_parcelles/', map_parcelles, name='map_parcelles'),
path('map_parcelles/', catre_parcelles, name='catre_parcelles'),
path('api/v1/coop_parcelles/<int:id>', coop_parcelles, name='coop_parcelles'),
path('coop_parcelles/', catre_parcelles_coop, name='carte_parcelles_coop'),
# path('plantings/api/v1/', planting_list, name="plantings"),
# path('producteurs/api/v1/', produteur_list, name="producteurs"),
# path('parcelles/api/v1/', parcelles_list, name="parcelles"),
# path('plantings/api/v1/', planting_list, name="plantings"),
# path('details_plantings/api/v1/', details_planting_list, name="details_plantings"),
]
|
[
"parheto10@gmail.com"
] |
parheto10@gmail.com
|
6578c2d9e7c4ebe710d7ec7ba3661cb86e8b6c35
|
411a600c355e34f8a3d158a1de6b22f0f509aa18
|
/hw2/hw2-copy-figures.py
|
0440c833791a9c594c3ed8532298d41ec10fc7cf
|
[] |
no_license
|
eggachecat/pynn
|
e216e6cd5f0c9671ef5009e1422bdaa801f7b0f4
|
74a75ee56483be22b520b418b5a52ae176f4a8e1
|
refs/heads/master
| 2021-01-11T00:37:58.010712
| 2016-12-31T17:25:38
| 2016-12-31T17:25:38
| 70,506,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
import shutil, errno
import os
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
results = [988970911,
941530379,
349685028,
783952587,
156884050,
449777493,
983956549,
106841919,
994865007,
87401771,
782990781,
666671943,
944565074,
195339946,
312443606,
721505406,
41157021,
790121321,
805213998,
963255433]
root_src = os.path.join(os.path.dirname(__file__ ) + "\exp_figures\\")
root_dst = "d:\\good_exp_figures"
if not os.path.exists(root_dst):
os.makedirs(root_dst)
for dirname in results:
src = root_src + "\\" + str(dirname)
dst = root_dst + "\\" + str(dirname)
copyanything(src, dst)
|
[
"sunao_0626@hotmail.com"
] |
sunao_0626@hotmail.com
|
76cc67eda36092c76628d9d8c651dd0f974afeda
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/cmd/data/CreateDataInStructureBackgroundCmd.pyi
|
5722a33e1967b7c8c164d39a1314696abfc50abb
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,914
|
pyi
|
import ghidra.framework.cmd
import ghidra.framework.model
import ghidra.util.task
import java.lang
class CreateDataInStructureBackgroundCmd(ghidra.framework.cmd.BackgroundCommand):
"""
Background command to create data across a selection inside of a structure.
"""
@overload
def __init__(self, addr: ghidra.program.model.address.Address, startPath: List[int], length: int, dt: ghidra.program.model.data.DataType):
"""
Constructs a command for applying dataTypes within an existing structure
across a range of components.
Simple pointer conversion will NOT be performed.
@param addr The address of the existing structure.
@param startPath the componentPath where to begin applying the datatype.
@param length the number of bytes to apply the data type to.
@param dt the datatype to be applied to the range of components.
"""
...
@overload
def __init__(self, addr: ghidra.program.model.address.Address, startPath: List[int], length: int, dt: ghidra.program.model.data.DataType, stackPointers: bool):
"""
This is the same as {@link #CreateDataInStructureBackgroundCmd(Address, int[], int, DataType )} except that
it allows the caller to control whether or not a pointer data type is created when a
non-pointer data type is applied at a location that previously contained a pointer data
type.
@param addr The address of the existing structure.
@param startPath the componentPath where to begin applying the datatype.
@param length the number of bytes to apply the data type to.
@param dt the datatype to be applied to the range of components.
@param stackPointers True will convert the given data type to a pointer if it is not one
and the previous type was a pointer; false will not make this conversion
"""
...
@overload
def applyTo(self, obj: ghidra.framework.model.DomainObject) -> bool: ...
@overload
def applyTo(self, obj: ghidra.framework.model.DomainObject, monitor: ghidra.util.task.TaskMonitor) -> bool:
"""
@see ghidra.framework.cmd.BackgroundCommand#applyTo(ghidra.framework.model.DomainObject, ghidra.util.task.TaskMonitor)
"""
...
def canCancel(self) -> bool:
"""
Check if the command can be canceled.
@return true if this command can be canceled
"""
...
def dispose(self) -> None:
"""
Called when this command is going to be removed/canceled without
running it. This gives the command the opportunity to free any
temporary resources it has hold of.
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getName(self) -> unicode: ...
def getStatusMsg(self) -> unicode: ...
def hasProgress(self) -> bool:
"""
Check if the command provides progress information.
@return true if the command shows progress information
"""
...
def hashCode(self) -> int: ...
def isModal(self) -> bool:
"""
Check if the command requires the monitor to be modal. No other
command should be allowed, and the GUI will be locked.
@return true if no other operation should be going on while this
command is in progress.
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def taskCompleted(self) -> None:
"""
Called when the task monitor is completely done with indicating progress.
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
554ddb63741d0c1664dbeb9b2eae63e3ea3ff840
|
d60ee49abaee6c74c5b777f8f112a7f75f71f029
|
/genome/variants2/filter/VCF/somatic/genes/indels/common_rsid.py
|
e24057ae9cd0d708b82f81fa0fea1cf630326aea
|
[] |
no_license
|
ak352/melanomics
|
41530f623b4bfdbd5c7b952debcb47622d1a8e88
|
fc5e6fdb1499616fb25a8dc05259add8a65aeca0
|
refs/heads/master
| 2020-12-24T16:14:42.271416
| 2015-08-06T12:48:52
| 2015-08-06T12:48:52
| 18,439,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
import sys
def ParseFields(line):
fields = {}
var = line[:-1].split("\t")
for x in range(0, len(var)):
fields[var[x]] = x
return fields
def read_tsv(infile):
with open(infile) as f:
var = ParseFields(next(f))
for line in f:
record = {}
line = line[:-1].split("\t")
for x in var:
record[x] = line[var[x]]
record["all"] = "\t".join(line)
yield record
def read_vcf(infile, count=False):
with open(infile) as f:
line = next(f)
while line.startswith("##"):
line = next(f)
assert line.startswith("#CHROM")
var = ParseFields(line)
num_lines = 0
for line in f:
record = {}
line = line[1:-1].split("\t")
for x in var:
record[x] = line[var[x]]
record["all"] = "\t".join(line)
num_lines += 1
if count:
if num_lines % 1000000:
sys.stderr.write("%d lines processed...\n" % num_lines)
yield record
def read_dbsnp_vcf():
# common_variants = set()
# for record in read_vcf(vcf):
# for info in record["INFO"].split(";"):
# info = info.split("=")
# if info[0] == "COMMON":
# if info[1] == "1":
# iden = record["ID"]
# assert
# common_variants.add()
# values.add(info[1])
# print values
return
def report(line, log):
for s in sys.stderr, log:
s.write(line)
def get_non_flagged():
vcf="/work/projects/isbsequencing/data/dbsnp/hg19/dbSNP138/00-All.vcf"
infile = "/work/projects/melanomics/tools/annovar/2015Mar22/annovar/humandb/hg19_snp138NonFlagged.wheader.txt.rsids"
stderr = sys.stderr
stderr.write("dbSNP Non-flagged file: %s\n" % infile)
ids = set([line[:-1] for line in open(infile)])
return ids
|
[
"ak@uni.fake"
] |
ak@uni.fake
|
4e44339e8aa9e97762dd77cc73821e37bf44948a
|
8613ec7f381a6683ae24b54fb2fb2ac24556ad0b
|
/boot/hard/knight.py
|
906fb70920ec67cbdcdac0eaf27502fa1a44eb0f
|
[] |
no_license
|
Forest-Y/AtCoder
|
787aa3c7dc4d999a71661465349428ba60eb2f16
|
f97209da3743026920fb4a89fc0e4d42b3d5e277
|
refs/heads/master
| 2023-08-25T13:31:46.062197
| 2021-10-29T12:54:24
| 2021-10-29T12:54:24
| 301,642,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
x, y = map(int, input().split())
mod = 10 ** 9 + 7
if (x + y) % 3 != 0:
ans = 0
else:
n, m = (2 * x - y) / 3, (2 * y - x) / 3
ans = 0
if n >= 0 and m >= 0:
n, m = int(n), int(m)
ans = 1
for i in range(min(m, n)):
ans = ans * (n + m - i) % mod
ans *= pow(i + 1, mod - 2, mod)
print(ans % mod)
|
[
"yuuya15009@gmail.com"
] |
yuuya15009@gmail.com
|
dbf7273504313e2c22795c47ad2dc7bcb84860ae
|
2f86dda1ede21eb5fd0ad9bd32efb7de4c268efd
|
/citizen/spiders/spider.py
|
71e50c1ffc85a415075fb41f56e5e3ced9e7f37f
|
[] |
no_license
|
SimeonYS/citizen
|
5a08c0108f2d1509ee34c4c40234a4bd406ca026
|
69dd47e459e251e18d6ecd18a8b6b86df64ceb59
|
refs/heads/main
| 2023-04-01T08:50:36.399086
| 2021-03-30T13:18:29
| 2021-03-30T13:18:29
| 353,007,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import CitizenItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
class CitizenSpider(scrapy.Spider):
name = 'citizen'
start_urls = ['https://www.citizens-bank.com/news/']
def parse(self, response):
post_links = response.xpath('//h2/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
date = response.xpath('//span[@class="fl-post-info-date"]/text()').get()
title = response.xpath('//h1/span/text()').get()
content = response.xpath('//div[@class="fl-module fl-module-fl-post-content fl-node-599c6b46b54ad"]//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=CitizenItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
yield item.load_item()
|
[
"simeon.simeonov@ADPVT.com"
] |
simeon.simeonov@ADPVT.com
|
0dbf512149e37a7f23588db495fee8f788b3ee6c
|
edf510cc5bbbe24469d8ff262c022b33b4d80a75
|
/tacotron2/data/text/symbols.py
|
3608a7a394aeac2cffae9ed87a7c4183ba9886b1
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
rheehot/Tacotron2
|
e8b8a4be614708800b10b9fa7829264407510fa8
|
ddbe55b426397d40cadd14f5040c55ba7c25615d
|
refs/heads/master
| 2022-12-26T14:13:39.966498
| 2020-10-06T18:34:57
| 2020-10-06T18:34:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 736
|
py
|
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from tacotron2.data.text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
|
[
"sh951011@gmail.com"
] |
sh951011@gmail.com
|
8d2bea60b5b2b31185d035449eb07b04603efb6d
|
f76ba1e72ed81d85450f2584ddbb9b033396a3db
|
/alembic/versions/20210306_212825_.py
|
303ec6cd6ce7b1e84e2bc2a92f015e4e69b8404b
|
[
"MIT"
] |
permissive
|
webclinic017/magnet-migrade
|
0e4823c32a6734628b0d3fc119f9c20ea1f9a167
|
b5669b34a6a3b845df8df96dfedaf967df6b88e2
|
refs/heads/main
| 2023-05-07T02:45:20.594756
| 2021-06-08T02:14:24
| 2021-06-08T02:14:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,800
|
py
|
"""empty message
Revision ID: 20210306_212825
Revises: 20210306_151508
Create Date: 2021-03-06 21:28:25.744854
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "20210306_212825"
down_revision = "20210306_151508"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"trade_account",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=1024), nullable=False),
sa.Column("provider", sa.String(length=255), nullable=False),
sa.Column("market", sa.String(length=255), nullable=False),
sa.Column("margin", sa.DECIMAL(), nullable=False),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], onupdate="RESTRICT", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"trade_virtual_account",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("trade_account_id", sa.Integer(), nullable=True),
sa.Column("version", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("description", sa.String(length=1024), nullable=False),
sa.Column("product", sa.String(length=255), nullable=False),
sa.Column("periods", sa.Integer(), nullable=False),
sa.Column("allocated_margin", sa.DECIMAL(), nullable=False),
sa.Column("allocation_rate", sa.DECIMAL(), nullable=False),
sa.Column("ask_limit_rate", sa.DECIMAL(), nullable=True),
sa.Column("ask_loss_rate", sa.DECIMAL(), nullable=True),
sa.Column("bid_limit_rate", sa.DECIMAL(), nullable=True),
sa.Column("bid_loss_rate", sa.DECIMAL(), nullable=True),
sa.Column("position", sa.JSON(), nullable=True),
sa.ForeignKeyConstraint(
["trade_account_id"],
["trade_account.id"],
onupdate="RESTRICT",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["user_id"], ["users.id"], onupdate="RESTRICT", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("trade_account_id", "name"),
)
op.drop_table("trade_acount")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"trade_acount",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("version", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("name", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column(
"description", sa.VARCHAR(length=1024), autoincrement=False, nullable=False
),
sa.Column(
"provider", sa.VARCHAR(length=255), autoincrement=False, nullable=False
),
sa.Column(
"market", sa.VARCHAR(length=255), autoincrement=False, nullable=False
),
sa.Column(
"accounts",
postgresql.JSON(astext_type=sa.Text()),
autoincrement=False,
nullable=False,
),
sa.Column("margin", sa.NUMERIC(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="trade_acount_pkey"),
)
op.drop_table("trade_virtual_account")
op.drop_table("trade_account")
# ### end Alembic commands ###
|
[
"y-sasahara@ys-method.com"
] |
y-sasahara@ys-method.com
|
31bb29c24f4df0a7d8e86690e72edab6a5cdcf44
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/easy/Linked List Cycle.py
|
ef76a4e5ca516e9b8d0abae3a86665e3f9b96c72
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316
| 2022-09-01T08:20:37
| 2022-09-01T08:20:37
| 95,668,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/9/25 0025 15:17
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: Linked List Cycle.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
Given a linked list, determine if it has a cycle in it.
To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to. If pos is -1, then there is no cycle in the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: true
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: false
Explanation: There is no cycle in the linked list.
Follow up:
Can you solve it using O(1) (i.e. constant) memory?
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def listToListNode(data: list) -> ListNode:
listRoot = ListNode(0)
ptr = listRoot
for d in data:
ptr.next = ListNode(d)
ptr = ptr.next
ptr = listRoot.next
return ptr
def printListNode(l: ListNode) -> None:
p = l
val_list = []
while p:
val_list.append(str(p.val))
p = p.next
print(' -> '.join(val_list))
def hasCycle(head):
if not head:
return False
f, s = head, head
while s.next and s.next.next:
f = f.next
s = s.next.next
if f == s:
return True
return False
if __name__ == '__main__':
input = [3, 2, 0, -4]
head = listToListNode(input)
result = hasCycle(head)
print(result)
|
[
"sqw123az@sina.com"
] |
sqw123az@sina.com
|
8b90d101850fc61cbc1c2b0fdcc37cce6600ae8c
|
058f1b9c83aa55c4803851f7880de00767491c00
|
/test/test_search.py
|
28b1174531fa143844710aced51a3f97565242a1
|
[
"MIT"
] |
permissive
|
samsammurphy/sat-search
|
f9a017ded12486e95826f62cc67fc2533010cbd5
|
d81e4774a41990b73b55db4b1e05b21062dd957c
|
refs/heads/master
| 2020-05-22T15:17:31.071946
| 2019-02-14T21:37:34
| 2019-02-14T21:37:34
| 186,404,233
| 0
| 0
|
MIT
| 2019-05-13T11:12:42
| 2019-05-13T11:12:42
| null |
UTF-8
|
Python
| false
| false
| 3,123
|
py
|
import os
import glob
import json
import unittest
import satsearch.config as config
from satstac import Item
from satsearch.search import SatSearchError, Search
class Test(unittest.TestCase):
path = os.path.dirname(__file__)
results = []
@classmethod
def setUpClass(cls):
fnames = glob.glob(os.path.join(cls.path, '*-item*.json'))
for fname in fnames:
with open(fname) as f:
cls.results.append(json.load(f))
def get_searches(self):
""" Initialize and return search object """
return [Search(datetime=r['properties']['datetime']) for r in self.results]
def test_search_init(self):
""" Initialize a search object """
search = self.get_searches()[0]
dts = [r['properties']['datetime'] for r in self.results]
assert(len(search.kwargs) == 1)
assert('time' in search.kwargs)
for kw in search.kwargs:
self.assertTrue(search.kwargs[kw] in dts)
def test_search_for_items_by_date(self):
""" Search for specific item """
search = self.get_searches()[0]
sids = [r['id'] for r in self.results]
items = search.items()
assert(len(items) == 1)
for s in items:
self.assertTrue(s.id in sids)
def test_empty_search(self):
""" Perform search for 0 results """
search = Search(datetime='2001-01-01')
self.assertEqual(search.found(), 0)
def test_geo_search(self):
""" Perform simple query """
with open(os.path.join(self.path, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
search = Search(datetime='2018-09-25', intersects=aoi)
assert(search.found() == 2)
items = search.items()
assert(len(items) == 2)
assert(isinstance(items[0], Item))
def test_search_sort(self):
""" Perform search with sort """
with open(os.path.join(self.path, 'aoi1.geojson')) as f:
aoi = json.dumps(json.load(f))
search = Search.search(datetime='2018-01-01/2018-01-15', intersects=aoi, sort=['<datetime'])
items = search.items()
assert(len(items) == 33)
def test_get_items_by_id(self):
""" Get Items by ID """
ids = ['LC80340332018034LGN00', 'LC80340322018034LGN00']
items = Search.items_by_id(ids, collection='landsat-8-l1')
assert(len(items) == 2)
def test_get_ids_search(self):
""" Get Items by ID through normal search """
ids = ['LC80340332018034LGN00', 'LC80340322018034LGN00']
search = Search.search(ids=ids, collection='landsat-8-l1')
items = search.items()
assert(search.found() == 2)
assert(len(items) == 2)
def test_get_ids_without_collection(self):
with self.assertRaises(SatSearchError):
search = Search.search(ids=['LC80340332018034LGN00'])
items = search.items()
def test_query_bad_url(self):
with self.assertRaises(SatSearchError):
Search.query(url=os.path.join(config.API_URL, 'collections/nosuchcollection'))
|
[
"matt.a.hanson@gmail.com"
] |
matt.a.hanson@gmail.com
|
dd6003a357da5d293ef2ebc35647330ad910df9f
|
17e9441138f8ad09eab3d017c0fa13fa27951589
|
/blog19-Iris/test07-show.py
|
6f86adb80886cf72e2411f2a744018f4f6a03e69
|
[] |
no_license
|
My-lsh/Python-for-Data-Mining
|
159a09e76b35efd46ca3e32ad6dd2174847d5ec4
|
f2dd0b8f3c4f5f51a10613dff99041bca4fd64c5
|
refs/heads/master
| 2023-03-26T08:48:32.088713
| 2021-03-25T14:57:07
| 2021-03-25T14:57:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
import pandas
import matplotlib.pyplot as plt
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
from pandas.plotting import scatter_matrix
scatter_matrix(dataset, alpha=0.2, figsize=(6, 6), diagonal='kde')
plt.show()
|
[
"noreply@github.com"
] |
My-lsh.noreply@github.com
|
bceaaba35117b38f3ff8200721312099d8f48e8f
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/SBSteat_20190605153204.py
|
aa2a19ba3ef7566408312123afa1ed7895fdf977
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
def SBS(A,B):
if A==0 or B ==0:
return 0
elif set(A)<=set(B) or set(B)<=set(A):
return 1
else:
return len(set(A)&set(B)) /len(set(A)|set(B))
def StrToList(A):
C=[]
for i in A:
C.append(i)
return C
import re
f = open('D:\DeepLearning ER\Z1006014.txt','r',errors='ignore')
g = open(r'C:\Users\Administrator\Desktop\ICD-10.txt','r',errors='ignore')
line_re=[]
lines = f.readlines()
dics=g.readlines()
out = []
for line in lines:
line=re.sub('\n','',line)
line=re.sub(' ','',line)
line = re.sub(r'\?|?', '',line)
line = re.sub(r'\,|\.|;','',line)
line_re.append(line)
while '' in line_re:
line_re.remove('')
for line in line_re:
for dic in dics:
dic=re.sub('\n','',dic)
if set(line) == set(dic):
out.append(dic)
elif SBS(line,dic)>0.8 and SBS(line,dic) <1:
out.append(dic)
import EMRdef
out=EMRdef.delre(out)
emr
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
566ff4df4c434e2553096332053b6321ca1e06a3
|
0e8e9bef32f40f5d0fd8c302293ae66732770b66
|
/2015/pythonlearn/fromLiaoxuefeng/0078aiohttp.py
|
f04a0ede82976e6b39b25ff3a8cc8145103a4a43
|
[] |
no_license
|
tuouo/selfstudy
|
4a33ec06d252388816ad38aa44838e2b728178d4
|
139a0d63477298addfff5b9ea8d39fab96734f25
|
refs/heads/master
| 2021-01-24T18:37:31.023908
| 2018-03-01T02:55:16
| 2018-03-01T02:55:16
| 84,453,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from aiohttp import web
def index(rerquest):
return web.Response(body = b'<h1>Index</h1>')
def hi(request):
yield from asyncio.sleep(0.5)
text = '<h1>hello, %s!</h1>' % request.match_info['name']
return web.Response(body = text.encode('utf-8'))
@asyncio.coroutine
def init(loop):
app = web.Application(loop = loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hi)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8000)
print('Server started at http:// 127.0.0.1:8000 ...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop)) # init() is coroutine for aiohttp
loop.run_forever()
|
[
"ltytuotuo@gmail.com"
] |
ltytuotuo@gmail.com
|
0df5d9ca5055a7189593c2e924ba6b44c8cfa9cc
|
172d4078d886b05cea2abdcf7aa458308c48458b
|
/apache/staging.wsgi
|
16c1eba3c84abcd084102c377378178d0c68aecb
|
[] |
no_license
|
elhoyos/forest
|
9066569e42266bcd529c1d6d510119a23a819a65
|
55fca9770a4e24e7c35b8a47d3b27225a760625e
|
refs/heads/master
| 2020-05-16T13:50:33.403078
| 2019-03-26T19:58:26
| 2019-03-26T19:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
wsgi
|
import os, sys, site
# enable the virtualenv
site.addsitedir('/var/www/forest/forest/ve/lib/python2.6/site-packages')
# paths we might need to pick up the project's settings
sys.path.append('/var/www/')
sys.path.append('/var/www/forest/')
sys.path.append('/var/www/forest/forest/')
os.environ['DJANGO_SETTINGS_MODULE'] = 'forest.settings_staging'
import django.core.handlers.wsgi
import django
django.setup()
application = django.core.handlers.wsgi.WSGIHandler()
|
[
"anders@columbia.edu"
] |
anders@columbia.edu
|
d097f3000c69a5e673b71706ec7a9dd3bdfa960b
|
3b3eac834008a2f4df4506d8dc2ba4364a7b67e2
|
/nail_model_test.py
|
c0f962cae3a7e7d12e3c498b6bca757bb120dbf5
|
[] |
no_license
|
pokepetter/ggj2020_orion_oregano
|
37811f1a8b65b95bada0c1e5f6cd35d57e160e8f
|
439e4e64018e51e52a7cfb3c6c0b1617aba6056f
|
refs/heads/master
| 2020-12-26T11:01:28.343068
| 2020-02-03T18:34:34
| 2020-02-03T18:34:34
| 237,488,524
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
from ursina import *
app = Ursina()
nail_model = Entity(
model=Prismatoid(
base_shape=Circle(10),
path=(Vec3(0,0,0), Vec3(0,.9+.5,0), Vec3(0,.91+.5,0), Vec3(0,1+.5,0)),
thicknesses=(.2,.2,1,1),
))
EditorCamera()
app.run()
|
[
"pokepetter@gmail.com"
] |
pokepetter@gmail.com
|
29bb0154e69c61470509628247e814b0b21d2bdd
|
7af3888ea81123df246a73aa12c3b88c3b1e8440
|
/darwinist/sdef.py
|
dd58f41cda23cd9c54436cbec32e6dbae69547c3
|
[] |
no_license
|
gregorynicholas/darwinist
|
b29a08b2fe966e662cd94cc25d659406b3dce263
|
2a5e3d027a569b61ad54096463e2d97c95f9c029
|
refs/heads/master
| 2020-05-29T09:53:16.669213
| 2015-03-21T16:37:36
| 2015-03-21T16:37:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,309
|
py
|
#!/usr/bin/env python
"""
Module to create application dictionaries for appscript using command line sdef tool
"""
import os
from subprocess import check_output
from lxml import etree as ET
class SDEFError(Exception):
def __str__(self):
return self.args[0]
class SDEF(object):
def __init__(self,path):
self.path = path
if not os.path.isdir(path):
raise SDEFError('Not a directory: %s' % path)
self.tree = ET.fromstring(check_output(['sdef',path]))
def __getattr__(self,attr):
if attr == 'terms':
return self.__generate_terms()
def __generate_terms(self):
output = 'version = 1.1\npath = %s' % self.path
classes = []
enums = []
properties = []
elements = []
commands = []
for suite in self.tree.xpath('suite'):
for node in suite.xpath('class'):
name = node.get('name').replace(' ','_')
code = node.get('code')
classes.append((name,code))
if node.get('inherits') is None:
continue
element = node.get('plural').replace(' ','_')
elements.append((element,code))
for node in suite.xpath('enumeration/enumerator'):
name = node.get('name').replace(' ','_')
code = node.get('code')
enums.append((name,code))
for node in suite.xpath('class/property'):
name = node.get('name').replace(' ','_')
code = node.get('code')
properties.append((name,code))
for node in suite.xpath('command'):
name = node.get('name').replace(' ','_')
code = node.get('code')
cparams = []
for p in node.xpath('parameter'):
pname = p.get('name').replace(' ','_')
pcode = p.get('code')
cparams.append((pname,pcode))
commands.append((name,code,cparams))
output += '\nclasses = %s' % classes
output += '\nenums = %s' % enums
output += '\nproperties = %s' % properties
output += '\nelements = %s' % elements
output += '\ncommands = %s' % commands
return output
|
[
"hile@iki.fi"
] |
hile@iki.fi
|
9646828aff8f61b37679c1833438c421c508b961
|
46234633cc7b66684af52f5b131834955115c80e
|
/train/gen/adv/models/particles/v4_Adam_trunc4_limit100/lib.py
|
12d4000398f71ed515e5ef88ffa57b4285c6e13c
|
[
"MIT"
] |
permissive
|
sammysiegel/SubtLeNet
|
80d2ee5d3beb1699702ddb78162d10eee95eb051
|
94d1507a8a7c60548b59400109b6c4086ad83141
|
refs/heads/master
| 2022-09-05T05:25:53.701377
| 2020-06-01T15:39:36
| 2020-06-01T15:39:36
| 268,620,433
| 0
| 0
|
MIT
| 2020-06-01T20:04:08
| 2020-06-01T20:04:08
| null |
UTF-8
|
Python
| false
| false
| 6,556
|
py
|
#!/usr/bin/env python2.7
from _common import *
from ..generators.gen import make_coll, generate, get_dims
from ..generators import gen as generator
'''
some global definitions
'''
NEPOCH = 50
VERSION = 4
MODELDIR = environ.get('MODELDIR', 'models/') + '/particles/'
BASEDIR = environ['BASEDIR']
OPTIMIZER = 'Adam'
_APOSTLE = None
train_opts = {
'learn_mass' : True,
'learn_pt' : True,
}
# must be called!
def instantiate(trunc=4, limit=50):
global _APOSTLE
generator.truncate = trunc
config.limit = limit
_APOSTLE = 'v%s_trunc%i_limit%i'%(str(VERSION), generator.truncate, config.limit)
system('mkdir -p %s/%s/'%(MODELDIR,_APOSTLE))
system('cp -v %s %s/%s/trainer.py'%(sys.argv[0], MODELDIR, _APOSTLE))
system('cp -v %s %s/%s/lib.py'%(__file__.replace('.pyc','.py'), MODELDIR, _APOSTLE))
# instantiate data loaders
top = make_coll(BASEDIR + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(BASEDIR + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
dims = get_dims(top)
with open('%s/%s/setup.py'%(MODELDIR, _APOSTLE),'w') as fsetup:
fsetup.write('''
from subtlenet import config
from subtlenet.generators import gen as generator
config.limit = %i
generator.truncate = %i
'''%(config.limit, generator.truncate))
return data, dims
'''
first build the classifier!
'''
# set up data
def setup_data(data):
opts = {}; opts.update(train_opts)
gen = {
'train' : generate(data, partition='train', batch=500, **opts),
'validation' : generate(data, partition='validate', batch=2000, **opts),
'test' : generate(data, partition='test', batch=10, **opts),
}
return gen
def setup_adv_data(data):
opts = {'decorr_mass':True}; opts.update(train_opts)
gen = {
'train' : generate(data, partition='train', batch=1000, **opts),
'validation' : generate(data, partition='validate', batch=2000, **opts),
'test' : generate(data, partition='test', batch=10, **opts),
}
return gen
# this is purely a discriminatory classifier
def build_classifier(dims):
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
# now build the particle network
h = BatchNormalization(momentum=0.6)(input_particles)
h = Conv1D(32, 2, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = Conv1D(16, 4, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = CuDNNLSTM(100)(h)
h = BatchNormalization(momentum=0.6)(h)
h = Dense(100, activation='relu', kernel_initializer='lecun_uniform')(h)
particles_final = BatchNormalization(momentum=0.6)(h)
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge)
for i in xrange(1,5):
h = Dense(50, activation='tanh')(h)
# if i%2:
# h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6)(h)
y_hat = Dense(config.n_truth, activation='softmax', name='y_hat')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
#classifier.compile(optimizer=Adam(lr=0.0002),
classifier.compile(optimizer=getattr(keras_objects, OPTIMIZER)(lr=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
return classifier
def build_adversary(clf, loss, scale, w_clf, w_adv):
y_hat = clf.outputs[0]
inputs= clf.inputs
kin_hats = Adversary(config.n_decorr_bins, n_outputs=1, scale=scale)(y_hat)
adversary = Model(inputs=inputs,
outputs=[y_hat]+kin_hats)
adversary.compile(optimizer=getattr(keras_objects, OPTIMIZER)(lr=0.00025),
loss=['categorical_crossentropy']+[loss for _ in kin_hats],
loss_weights=[w_clf]+[w_adv for _ in kin_hats])
print '########### ADVERSARY ############'
adversary.summary()
print '###################################'
return adversary
# train any model
def train(model, name, train_gen, validation_gen, save_clf_params=None):
if save_clf_params is not None:
callbacks = [PartialModelCheckpoint(filepath='%s/%s/%s_clf_best.h5'%(MODELDIR,_APOSTLE,name),
save_best_only=True, verbose=True,
**save_clf_params)]
save_clf = save_clf_params['partial_model']
else:
save_clf = model
callbacks = []
callbacks += [ModelCheckpoint('%s/%s/%s_best.h5'%(MODELDIR,_APOSTLE,name),
save_best_only=True, verbose=True)]
def save_classifier(name_=name, model_=save_clf):
model_.save('%s/%s/%s.h5'%(MODELDIR,_APOSTLE,name_))
def save_and_exit(signal=None, frame=None):
save_classifier()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
model.fit_generator(train_gen,
steps_per_epoch=3000,
epochs=NEPOCH,
validation_data=validation_gen,
validation_steps=2000,
callbacks = callbacks,
)
save_classifier()
def infer(modelh5, name):
model = load_model(modelh5,
custom_objects={'DenseBroadcast':DenseBroadcast})
model.summary()
coll = generator.make_coll(BASEDIR + '/PARTITION/*_CATEGORY.npy')
msd_norm_factor = 1. / config.max_mass
pt_norm_factor = 1. / (config.max_pt - config.min_pt)
msd_index = config.gen_singletons['msd']
pt_index = config.gen_singletons['pt']
def predict_t(data):
msd = data['singletons'][:,msd_index] * msd_norm_factor
pt = (data['singletons'][:,pt_index] - config.min_pt) * pt_norm_factor
if msd.shape[0] > 0:
particles = data['particles'][:,:config.limit,:generator.truncate]
r_t = model.predict([particles,msd,pt])[:,config.n_truth-1]
else:
r_t = np.empty((0,))
return r_t
print 'loaded from',modelh5,
print 'saving to',name
coll.infer(['singletons','particles'], f=predict_t, name=name, partition='test')
|
[
"sidn@mit.edu"
] |
sidn@mit.edu
|
3f51f53d28ce16889a1cd818a02b4b9acc096912
|
7bf1dc58ba0884ed957efdb5459ae44851b2b36e
|
/practice_450/strings/15_paranthesis_checker.py
|
907deb421b02d77c80a2ca9f344a5fbccafb12d0
|
[] |
no_license
|
ksaubhri12/ds_algo
|
672260f07f41bcfc33f8ac23a64085a1f27ab4a5
|
46505b89371cae3321f48609dd755c7e5cfed302
|
refs/heads/master
| 2023-05-12T08:37:06.789111
| 2023-05-03T03:06:49
| 2023-05-03T03:06:49
| 211,793,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
def parenthesis_checker(string_value: str):
opening_brackets = {'[': ']', '(': ')', '{': '}'}
opening_brackets_stack = []
answers = True
n = len(string_value)
for i in range(0, n):
element = string_value[i]
if element in opening_brackets:
opening_brackets_stack.append(element)
else:
if len(opening_brackets_stack) > 0:
element_in_stack = opening_brackets_stack.pop()
if not check_pair(element_in_stack, element):
answers = False
break
else:
answers = False
break
if answers and len(opening_brackets_stack) == 0:
return 'balanced'
else:
return 'not balanced'
def check_pair(opening_element, closing_element):
opening_brackets = {'[': ']', '(': ')', '{': '}'}
return closing_element == opening_brackets[opening_element]
if __name__ == '__main__':
print(parenthesis_checker('[()]{}{[()()]()}'))
print(parenthesis_checker(' [(])'))
print(parenthesis_checker('('))
|
[
"kalpesh@getvokal.com"
] |
kalpesh@getvokal.com
|
98ea4fafb6fc58089c1c4fd892af8ba9a7e65f51
|
bdaed512916fcf96e5dc915538fe8598aeb2d3cf
|
/mcex/distributions/special.py
|
a97815d40b32299438cac098d8f416c9cb2e2a25
|
[] |
no_license
|
jsalvatier/mcex
|
9657cc2e8083f4e4dd013baaaceba08f9a48754e
|
040f49bfd6eb467ef4d50d15de25033b1ba52c55
|
refs/heads/master
| 2021-06-18T19:02:07.055877
| 2017-01-22T01:10:01
| 2017-01-22T01:10:01
| 1,455,409
| 9
| 3
| null | 2012-06-21T18:07:36
| 2011-03-08T17:02:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,820
|
py
|
'''
Created on Mar 17, 2011
@author: jsalvatier
'''
from theano import scalar,tensor
import numpy
from scipy import special, misc
class GammaLn(scalar.UnaryScalarOp):
"""
Compute gammaln(x)
"""
@staticmethod
def st_impl(x):
return special.gammaln(x)
def impl(self, x):
return GammaLn.st_impl(x)
def grad(self, inp, grads):
x, = inp
gz, = grads
return [gz * scalar_psi(x)]
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
lgamma(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
scalar_gammaln = GammaLn(scalar.upgrade_to_float, name='scalar_gammaln')
gammaln = tensor.Elemwise(scalar_gammaln, name='gammaln')
class Psi(scalar.UnaryScalarOp):
"""
Compute derivative of gammaln(x)
"""
@staticmethod
def st_impl(x):
return special.psi(x)
def impl(self, x):
return Psi.st_impl(x)
#def grad() no gradient now
def c_support_code(self):
return (
"""
#ifndef _PSIFUNCDEFINED
#define _PSIFUNCDEFINED
double _psi(double x){
/*taken from
Bernardo, J. M. (1976). Algorithm AS 103: Psi (Digamma) Function. Applied Statistics. 25 (3), 315-317.
http://www.uv.es/~bernardo/1976AppStatist.pdf */
double y, R, psi_ = 0;
double S = 1.0e-5;
double C = 8.5;
double S3 = 8.333333333e-2;
double S4 = 8.333333333e-3;
double S5 = 3.968253968e-3;
double D1 = -0.5772156649 ;
y = x;
if (y <= 0.0)
return psi_;
if (y <= S )
return D1 - 1.0/y;
while (y < C){
psi_ = psi_ - 1.0 / y;
y = y + 1;}
R = 1.0 / y;
psi_ = psi_ + log(y) - .5 * R ;
R= R*R;
psi_ = psi_ - R * (S3 - R * (S4 - R * S5));
return psi_;}
#endif
""" )
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
_psi(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
scalar_psi = Psi(scalar.upgrade_to_float, name='scalar_psi')
psi = tensor.Elemwise(scalar_psi, name='psi')
class FactLn(scalar.UnaryScalarOp):
"""
Compute factln(x)
"""
@staticmethod
def st_impl(x):
return numpy.log(misc.factorial(x))
def impl(self, x):
return FactLn.st_impl(x)
#def grad() no gradient now
def c_support_code(self):
return (
"""
double factln(int n){
static double cachedfl[100];
if (n < 0) return -1.0; // need to return -inf here at some point
if (n <= 1) return 0.0;
if (n < 100) return cachedfl[n] ? cachedfl[n] : (cachedfl[n]=lgammln(n + 1.0));
else return lgammln(n+1.0);}
""" )
def c_code(self, node, name, inp, out, sub):
x, = inp
z, = out
if node.inputs[0].type in [scalar.float32, scalar.float64]:
return """%(z)s =
factln(%(x)s);""" % locals()
raise NotImplementedError('only floatingpoint is implemented')
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
scalar_factln = Psi(scalar.upgrade_to_float, name='scalar_factln')
factln = tensor.Elemwise(scalar_factln, name='factln')
|
[
"jsalvatier@gmail.com"
] |
jsalvatier@gmail.com
|
52a83fa3c2634369a6ddf6a2f9101569c9c107c6
|
32a6db4d595ef4d308ac0e2ef37c57f65a777bfc
|
/ZYCami_00_彭小钗/PO/Wx_Element.py
|
5f9021ea65cb3db03881e667b4966af054d6d0fb
|
[] |
no_license
|
wangdan377/Python_UI
|
1c8f0b3d46272d72f849f242c39e035c6b20720b
|
6c3e23b301ffe14cbd27a5211e48c8f79169dcf9
|
refs/heads/master
| 2023-02-17T02:37:34.353523
| 2021-01-19T11:58:22
| 2021-01-19T11:58:22
| 311,855,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,981
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from selenium.webdriver.common.by import By
from PO.Base_page import Base_Page
class Wx_Page(Base_Page):
# 微信支付
File_wx = (By.ID, 'com.zhiyun.cama:id/btn_wx')
# 微信支付方式的关闭按钮
File_wx_closed = (By.XPATH,'/hierarchy/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout')
# 立即支付
File_pay_wx = (By.ID, '立即支付')
# 选择零钱按钮
File_Change_button = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[4]/android.view.ViewGroup[2]/com.tencent.mm.ui.MMImageView')
# 选择零钱支付
File_Change_pay = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[1]/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.TextView')
# 选择建设银行支付
File_Construction_pay = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[2]/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup')
# 选择江苏银行支付
File_jsu_pay = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[2]/android.widget.ScrollView/android.view.ViewGroup/android.view.ViewGroup[3]/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup')
# 请输入支付密码框
File_pay_password = (By.XPATH,'//android.widget.FrameLayout[@content-desc=\"当前所在页面,支付\"]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout/android.widget.FrameLayout[1]/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup/android.view.ViewGroup[5]/android.widget.RelativeLayout/android.widget.RelativeLayout')
# 关闭支付密码页面弹框
File_closed_password = (By.XPATH,'//android.view.ViewGroup[@content-desc=\"关闭\"]/android.view.ViewGroup/com.tencent.mm.ui.MMImageView')
# 支付页面左上角的X
File_pay_x = (By.ID, '返回')
# 继续支付
File_connectinue_pay = (By.ID, 'com.tencent.mm:id/dom')
# 放弃
File_give_up_pay = (By.ID, 'com.tencent.mm:id/doz')
#支付成功,返回商家
File_return_app = (By.ID, '返回商家')
#密码输入错误,重试
File_doz = (By.ID, 'com.tencent.mm:id/doz')
# 支付页面左上角的X
def click_File_pay_x(self):
self.find_element(*self.File_pay_x).click()
# 继续支付
def click_File_connectinue_pay(self):
self.find_element(*self.File_connectinue_pay).click()
# 放弃
def click_File_give_up_pay(self):
self.find_element(*self.File_give_up_pay).click()
# 选择微信支付
def click_File_wx(self):
self.find_element(*self.File_wx).click()
# 微信支付方式的关闭按钮
def click_File_wx_closed(self):
self.find_element(*self.File_wx_closed).click()
# 立即支付
def click_File_pay_wx(self):
self.find_element(*self.File_pay_wx).click()
# 选择零钱按钮
def click_File_Change_button(self):
self.find_element(*self.File_Change_button).click()
# 选择零钱支付
def click_File_Change_pay(self):
self.find_element(*self.File_Change_pay).click()
# 选择建设银行支付
def click_File_Construction_pay(self):
self.find_element(*self.File_Construction_pay).click()
# 选择江苏银行支付
def click_File_jsu_pay(self):
self.find_element(*self.File_jsu_pay).click()
# 请输入支付密码框
def click_File_pay_password(self):
self.find_element(*self.File_pay_password).click()
# 关闭支付密码页面弹框
def click_File_closed_password(self):
self.find_element(*self.File_closed_password).click()
#返回商家
def click_File_return_app(self):
self.find_element(*self.File_return_app).click()
# 密码输入错误,重试
def click_File_doz(self):
self.find_element(*self.File_doz).click()
|
[
"1065913054@qq.com"
] |
1065913054@qq.com
|
edcb570bd7fd632da38d368049889cb40b88c7f0
|
2d0da5d8f45e1906bb2a2eee0901e7fddd5dc7ad
|
/scripts/run_scripts/pha/run_pha_full1.py
|
d4a5fdb586a3f61ebb2138440d1c83f19148e4f7
|
[
"MIT"
] |
permissive
|
akazachk/pha
|
09afd2fa6764ef9133a8ae91bb189e2896e076c6
|
4120f70554cb0a149d5ab52e04409302e78059fa
|
refs/heads/master
| 2021-09-25T01:02:42.488470
| 2021-09-15T17:51:34
| 2021-09-15T17:51:34
| 194,751,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,410
|
py
|
####
## The inputted file is either [filename].instances or [file].batch.
## (The extension is not important.)
## The first line of this file will be the directory ``stub'',
## and output will be sent to ${PROJ_DIR}/results/instances/stub if batch mode is off,
## and to ${PROJ_DIR}/results/instances/batches/stub/batchname if it is on.
## Each line contains either a relative input path to an instance (with or without the extension) or a batch name.
## The path is relative to ${PROJ_DIR}/data/instances, e.g., the line will be miplib2/bm23.
## A batch name is distinguished by having the batch end with a '/', e.g., '2/' or 'batch2/'.
## Set up proper path variables
import os
PROJ_DIR = os.path.abspath(os.environ['PHA_DIR'])
EXECUTABLE = PROJ_DIR + "/Release/PHA"
## Solver options
phaActOptions = [1]
numAlg2Rounds = [0,1,2,3,4,-1,-2,-3,-4]
#numRaysToBeCut = [0]
cutLimit = [1000] # negative means divide the limit across splits
useSplitShare = [0,1000]
numCutsIterBilinear = [0]
useUnitVectorsHeur = [0,1]
useCutVertHeur = [0,1]
useTightPointsHeur = [0,1000]
usePresolve = [0,1]
## Set up output and input folders
results_path = PROJ_DIR + '/results'
paramfile = PROJ_DIR + '/data/params/pha_params.txt'
#instances_path = os.getcwd()
instances_path = PROJ_DIR + "/data/instances"
instances_file = instances_path + '/' + "test.instances"
outinfo_stub = 'pha-full' + str(phaActOptions[0])
outinfo_dir = results_path
## Get arguments
from sys import argv
use_batches = False # set to true/false depending on if mps files are all in one folder or divided up into subfolders
if (len(argv) > 1):
use_batches = True if argv[1] in ['true', 'True', '1', 't'] else False
if (use_batches and len(argv) < 2):
raise ValueError('When using batches, specifying the folder is required')
if (len(argv) > 2):
instances_file = os.path.abspath(argv[2])
## Where are the instances?
with open(instances_file) as f_in:
list_to_use = list(filter(None, (line.rstrip() for line in f_in)))
## The first line will be the name of the directory we should use
dir_stub = list_to_use[0]
list_to_use = list_to_use[1:]
#instances_file_name = instances_file.split('/')[-1]
#instances_file_name_split_by_dot = instances_file_name.split('.')
#dir_stub = '.'.join(instances_file_name_split_by_dot[0:len(instances_file_name_split_by_dot)-1])
if use_batches:
dir_stub = "batches/" + dir_stub
## Finalize outinfo
outinfo_dir = outinfo_dir + '/' + dir_stub
os.system("mkdir -p " + outinfo_dir) # make the dir if it does not exist
## Choose order so that deepest for loop are the results you want to see first, fixing all others
batch_name = ''
for usepresolve in usePresolve:
# for numrays in numRaysToBeCut:
for cutlimit in cutLimit:
for numiterblp in numCutsIterBilinear:
for splitshareopt in useSplitShare:
for usecutvert in useCutVertHeur:
for useunitvec in useUnitVectorsHeur:
for usetight in useTightPointsHeur:
for numalg2 in numAlg2Rounds:
for actoption in phaActOptions:
## Skip if all zeroes for cut generation
if (splitshareopt == 0) and (numiterblp == 0) and (useunitvec == 0) and (usecutvert == 0) and (usetight == 0):
continue
for inst in list_to_use:
## Check if batch name
if (inst[-1] == '/'):
batch_name = inst
continue
## Check if need to add "mps"
inst_name = inst
if (inst[-4:] != '.mps') and (inst[-3:] != '.lp') and (inst[-7:] != '.mps.gz') and (inst[-6:] != '.lp.gz'):
inst_name = inst_name + '.mps'
## Run on instances_path/inst.mps
infile = instances_path + '/' + inst_name
curr_out_dir = outinfo_dir + '/' + batch_name
outinfo = curr_out_dir + outinfo_stub
## In case the out directory does not exist
os.system("mkdir -p " + curr_out_dir)
## Arguments
extraparams = \
' --opt_file=' + PROJ_DIR + '/data/ip_opt.csv' + \
" --hplane_scoring_fn=" + str(actoption) + \
" --num_alg2_rounds=" + str(numalg2) + \
" --cut_limit=" + str(cutlimit) + \
" --use_split_share=" + str(splitshareopt) + \
" --num_cuts_iter_bilinear=" + str(numiterblp) + \
" --use_unit_vectors_heur=" + str(useunitvec) + \
" --use_cut_vert_heur=" + str(usecutvert) + \
" --use_tight_points_heur=" + str(usetight) + \
" --cut_presolve=" + str(usepresolve) + \
" --rounds=" + str(1)
print(EXECUTABLE + " -i " + infile + " -o " + curr_out_dir + " --log_file=" + outinfo + " -p " + paramfile + extraparams)
os.system(EXECUTABLE + " -i " + infile + " -o " + curr_out_dir + " --log_file=" + outinfo + " -p " + paramfile + extraparams + " > /dev/null 2>&1")
|
[
"None"
] |
None
|
8a46b42a2d6965726648fa8823414ef23617c636
|
8c382ed6073bfc2dc3fda97d8344628ac669d548
|
/api/views.py
|
9cd911c86b7585e256c99cc284ffdbbc84072291
|
[] |
no_license
|
dmaras1808/ghiro
|
7a428d69944a2e4173b6603240a2c195c21ed7f4
|
439d395a1311ac6f802d0ee1402d37e99aeb5f95
|
refs/heads/master
| 2021-01-24T01:59:59.225272
| 2015-08-03T21:40:28
| 2015-08-03T21:40:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
# Ghiro - Copyright (C) 2013-2015 Ghiro Developers.
# This file is part of Ghiro.
# See the file 'docs/LICENSE.txt' for license terms.
import json
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from ghiro.common import log_activity
from ghiro.authorization import api_authenticate
from analyses.models import Case, Analysis
from lib.db import save_file
from lib.utils import create_thumb
@require_POST
@csrf_exempt
def new_case(request):
"""Creates a new case."""
user = api_authenticate(request.POST.get("api_key"))
if request.POST.get("name"):
case = Case(name=request.POST.get("name"),
description=request.POST.get("description"),
owner=user)
case.save()
# Auditing.
log_activity("C",
"Created new case via API %s" % case.name,
request,
user)
response_data = {"id": case.id}
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
return HttpResponse("Request not valid", status=400)
@require_POST
@csrf_exempt
def new_image(request):
"""Upload a new image."""
user = api_authenticate(request.POST.get("api_key"))
if request.POST.get("case_id"):
case = get_object_or_404(Case, pk=request.POST.get("case_id"))
# Security check.
if not case.can_write(user):
return HttpResponse("You are not authorized to add image to this", status=400)
if case.state == "C":
return HttpResponse("You cannot add an image to a closed case", status=400)
else:
case = None
task = Analysis.add_task(request.FILES["image"].temporary_file_path(),
file_name=request.FILES["image"].name, case=case, user=user,
content_type=request.FILES["image"].content_type,
image_id=save_file(file_path=request.FILES["image"].temporary_file_path(),
content_type=request.FILES["image"].content_type),
thumb_id=create_thumb(request.FILES["image"].temporary_file_path()))
# Auditing.
log_activity("I",
"Created new analysis via API %s" % task.file_name,
request,
user=user)
response_data = {"id": task.id}
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
[
"alessandro@tanasi.it"
] |
alessandro@tanasi.it
|
5ef49b95a868b1d76d979ef9518a54c565787183
|
481517a085014aefba963d29ff52b56bef6a393e
|
/abstractdemo.py
|
77dfa21d3b7352b51566e8f50caccc1c0957b0c9
|
[] |
no_license
|
27Saidou/cours_python
|
6d916fe63652e0463bd995dbb9a3ec72c74f4c3d
|
91820b826ced24bed98525429096e32ff4c036db
|
refs/heads/main
| 2022-01-09T09:58:32.514032
| 2022-01-04T18:37:56
| 2022-01-04T18:37:56
| 214,328,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from abc import ABC, abstractmethod
class A(ABC):
def __init__(self,value):
self.value = value
@abstractmethod
def add(self):
pass
@abstractmethod
def sub(self):
pass
class Y(A):
def add(self):
return self.value +100
def sub(self):
return self.value -10
obj=Y(100)
print(obj.add())
print(obj.sub())
|
[
"saidou224dev@hotmail.com"
] |
saidou224dev@hotmail.com
|
591439930127cc0ca00096c759275babb9b9f783
|
a943cb6da95ec1e06cb480887ba1062a5783527f
|
/2012-oss-hrs/plot-mg-k.py
|
fcb7775822791a3f8c6194f2e6dd460fa6fb664e
|
[] |
no_license
|
andycasey/papers
|
1b2c882c20b0c65b5899d70dc95825ec53cc9fe2
|
3d585ad4b6b1c3b40227185fd7b22ea9bdeb8e02
|
refs/heads/master
| 2021-01-19T17:24:48.788580
| 2013-08-13T08:51:02
| 2013-08-13T08:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
import numpy as np
mucciarelli_headers = 'ID Teff logg vt [Fe/H] [Fe/H]_err [Mg/Fe] [Mg/Fe]_err [K/Fe] [K/Fe]_err [Ti/Fe] [Ti/Fe]_err [Ca/Fe] [Ca/Fe]_err'
mg_fe, mg_fe_err, k_fe, k_fe_err = range(4)
mucciarelli_columns = ['[Mg/Fe]', '[Mg/Fe]_err', '[K/Fe]', '[K/Fe]_err']
mucciarelli_column_indices = [mucciarelli_headers.split().index(item) for item in mucciarelli_columns]
mucciarelli = np.loadtxt('ngc2419-mucciarelli.txt', usecols=mucciarelli_column_indices)
fig = plt.figure()
fig.subplots_adjust(left=0.10, right=0.95, bottom=0.07, wspace=0, hspace=0.14, top=0.95)
ax = fig.add_subplot(111)
ax.errorbar(mucciarelli[:, mg_fe], mucciarelli[:, k_fe], xerr=mucciarelli[:, mg_fe_err], yerr=mucciarelli[:, k_fe_err], fmt=None, ecolor='k')
ax.scatter(mucciarelli[:, mg_fe], mucciarelli[:, k_fe], facecolor='k', edgecolor='k', label='Mucciarelli et al. (DEIMOS; 2012)')
# Cohen data
cohen = np.loadtxt('ngc2419-cohen.txt', usecols=(1,2,3,4, ))
ax.errorbar(cohen[:, mg_fe], cohen[:, k_fe], xerr=cohen[:, mg_fe_err], yerr=cohen[:, k_fe_err], fmt=None, ecolor='g')
ax.scatter(cohen[:, mg_fe], cohen[:, k_fe], marker='s', edgecolor='g', facecolor='g', label='Cohen et al. (HIRES; 2011, 2012)')
ax.set_ylabel('[K/Fe]')
ax.set_xlabel('[Mg/Fe]')
ax.legend(loc=3)
|
[
"andycasey@gmail.com"
] |
andycasey@gmail.com
|
f4f7422e1d5486fb475b159c62317c606ffa5580
|
421d58c6b93b81e0724f8f4576119300eb344252
|
/influencers/core/migrations/0012_auto_20190130_1417.py
|
ebc63a175e53a43e7c2a89b47f2eb951d1aca9ef
|
[] |
no_license
|
momen/influencers
|
7728228c92a552bdff9ae62f85986ad03bce186e
|
f9c76cfc2970440112967f9579dc31f77063cb25
|
refs/heads/master
| 2020-06-03T22:20:03.881411
| 2019-06-15T07:48:43
| 2019-06-15T07:48:43
| 191,754,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# Generated by Django 2.1.4 on 2019-01-30 14:17
from django.db import migrations
import partial_index
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20190130_1305'),
]
operations = [
migrations.AddIndex(
model_name='bank',
index=partial_index.PartialIndex(fields=['swift', 'deleted'], name='core_bank_swift_db8a53_partial', unique=True, where=partial_index.PQ(deleted__isnull=True)),
),
]
|
[
"momennegm@gmail.com"
] |
momennegm@gmail.com
|
1d6583954d27d890d2ca28e0d0374a7fff088f35
|
a721e4ca65b79ce725c7b5b43539c963a3b55290
|
/Happy_Ladybugs.py
|
29beb1c6e1f59afc9ca77efce3b5bba9bf91db6d
|
[] |
no_license
|
joydas65/Hackerrank-Problems
|
0832d7cfd1de7e5df4dba76326ede735edc9afea
|
a16b3b0ebb65e7597f8f6417047da4d415a818c7
|
refs/heads/master
| 2022-06-21T12:47:55.241409
| 2022-06-18T18:21:08
| 2022-06-18T18:21:08
| 159,071,834
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
for _ in range(int(input())):
n = int(input())
b = input()
d = dict()
flag = 0
for i in b:
if i != '_':
d[i] = 0
for i in b:
if i != '_':
d[i] += 1
for i in d.keys():
if d[i] == 1:
flag = 1
break
if flag == 1:
print("NO")
else:
if b.count('_') == 0:
flag = 0
for i in range(1,len(b)-1):
if b[i] == b[i-1]:
continue
if b[i] == b[i+1]:
continue
flag = 1
break
if flag == 0:
print("YES")
else:
print("NO")
else:
print("YES")
|
[
"noreply@github.com"
] |
joydas65.noreply@github.com
|
aeb6d7fa4b40ab287c90e598989a0f927c0abae8
|
cb94a4cdd7a9df17f9c6f1a03f8f4ff12c916cf3
|
/Learning_Python_Generators/Exercise_Files/Ch1/01_03/reverse_capital.py
|
90ff072bda4cd2f9b2ffdfe04074e5da1265685b
|
[] |
no_license
|
sedstan/LinkedIn-Learning-Python-Course
|
2b936d0f00703a6e66a872220ed47572123dc7fd
|
b4584218355bf07aa3d2939b950911eae67adb0b
|
refs/heads/master
| 2021-10-11T10:19:13.675662
| 2019-01-24T17:55:20
| 2019-01-24T17:55:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
# list of names
names_list = ['Adam','Anne','Barry','Brianne','Charlie','Cassandra','David','Dana']
# too long
# reverse_uppercase = (name[::-1] for name in (name.upper() for name in names_list))
# breaking it up
uppercase = (name.upper() for name in names_list)
reverse_uppercase = (name[::-1] for name in uppercase)
|
[
"sed@wearewhy.co.uk"
] |
sed@wearewhy.co.uk
|
f9ab9a7d39cfc7a0dec43be133584c96e9afa1ef
|
93b3a69da031d3fa8402ca787cd5d22db9c09bb9
|
/__init__.py
|
68076caa6613a8ece58a6522f432a1bd21625059
|
[] |
no_license
|
Teifion/communique
|
ab93335d7042776410da34ac28ff8cacda62f73f
|
d7f96d6c9c524fa5ea03dce37edf57f1424d6710
|
refs/heads/master
| 2021-01-01T05:49:43.936323
| 2013-10-10T21:58:41
| 2013-10-10T21:58:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
from .api import register, send
def communique_nimblescan():
try:
from ..nimblescan import api as ns_api
except ImportError:
try:
from ...nimblescan import api as ns_api
except ImportError:
return
ns_api.register('communique.home', "Notifications", ['communique'], (lambda r: True), ns_api.make_forwarder("communique.home"))
def includeme(config):
from . import views
communique_nimblescan()
"""
Pass this to your configurator object like so:
from . import communique
config.include(communique, route_prefix="communique")
"""
# Standard views
config.add_route('communique.home', '/home')
config.add_route('communique.action', '/action/{action}')
config.add_route('communique.view', '/view/{notification_id}')
config.add_route('communique.mini_home', '/mini_home')
config.add_route('communique.home_count', '/home_count/{user_id}')
config.add_route('communique.create', '/create')
# Now link the views
config.add_view(views.home, route_name='communique.home', renderer='templates/home.pt', permission='loggedin')
config.add_view(views.action, route_name='communique.action', permission='loggedin')
config.add_view(views.mini_home, route_name='communique.mini_home', renderer='string', permission='loggedin')
config.add_view(views.home_count, route_name='communique.home_count', renderer='string')
config.add_view(views.view, route_name='communique.view', renderer="string", permission='loggedin')
# Not sure what you use but this is the dev type permission I've got on my system
config.add_view(views.create, route_name='communique.create', permission='code')
return config
|
[
"sarkalian@gmail.com"
] |
sarkalian@gmail.com
|
f45761d59529cdbe88da05d923c51475464181fa
|
22fc1933698e339f9de1c7cd8eb0062ef3a8711e
|
/examples/old-examples/snippets/mgl_new_example_glut.py
|
1dd466a064a85f41965edb3b9bfdbb29ba08927b
|
[
"MIT"
] |
permissive
|
einarf/ModernGL
|
f9a4929e529c560ca3dcc139994b7ff84a271a3f
|
e4a7f53289043a0ac06130c67edc75b878484a0e
|
refs/heads/master
| 2020-04-14T03:53:20.054962
| 2019-02-28T07:05:19
| 2019-02-28T07:05:19
| 163,619,410
| 1
| 0
|
MIT
| 2018-12-30T21:40:33
| 2018-12-30T21:40:32
| null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
import sys
import struct
from OpenGL.GLUT import (
GLUT_DEPTH, GLUT_DOUBLE, GLUT_RGB,
glutCreateWindow, glutDisplayFunc, glutInit, glutInitDisplayMode,
glutInitWindowSize, glutMainLoop, glutSwapBuffers,
)
import ModernGL
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(800, 600)
glutCreateWindow(b'')
ctx = ModernGL.create_context()
vert = ctx.vertex_shader('''
#version 330
in vec2 vert;
void main() {
gl_Position = vec4(vert, 0.0, 1.0);
}
''')
frag = ctx.fragment_shader('''
#version 330
out vec4 color;
void main() {
color = vec4(0.30, 0.50, 1.00, 1.0);
}
''')
prog = ctx.program(vert, frag])
vbo = ctx.buffer(struct.pack('6f', 0.0, 0.8, -0.6, -0.8, 0.6, -0.8))
vao = ctx.simple_vertex_array(prog, vbo, ['vert'])
def display():
ctx.clear(0.9, 0.9, 0.9)
vao.render()
glutSwapBuffers()
glutDisplayFunc(display)
glutMainLoop()
|
[
"cprogrammer1994@gmail.com"
] |
cprogrammer1994@gmail.com
|
de7019587d7e630b0b56aa865769304de2aa1f8f
|
6b79174551f8c5eee7ba5c3d4efe3c921b281d62
|
/models/register/employee.py
|
82aee95339589340a18b0e04bce9c8eac6b72852
|
[] |
no_license
|
Trilokan/manjal
|
5d99dea0703cdf4e4f4553b2710cfb3ac5f05023
|
064fd6f3ad429837dd46c59790a54927e9622e1b
|
refs/heads/master
| 2020-05-04T20:45:08.449320
| 2019-05-06T12:41:50
| 2019-05-06T12:41:50
| 179,449,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,783
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
BLOOD_GROUP = [('a+', 'A+'), ('b+', 'B+'), ('ab+', 'AB+'), ('o+', 'O+'),
('a-', 'A-'), ('b-', 'B-'), ('ab-', 'AB-'), ('o-', 'O-')]
GENDER = [('male', 'Male'), ('female', 'Female')]
MARITAL_STATUS = [('single', 'Single'), ('married', 'Married'), ('divorced', 'Divorced')]
class Employee(models.Model):
_name = "hr.employee"
name = fields.Char(string="Name", required=True)
employee_uid = fields.Char(string="Employee ID", readonly=True)
image = fields.Binary(string="Image")
small_image = fields.Binary(string="Small Image")
user_id = fields.Many2one(comodel_name="res.users", string="User")
person_id = fields.Many2one(comodel_name="arc.person", string="Person")
# Contact Details
email = fields.Char(string="e-Mail")
mobile = fields.Char(string="Mobile")
phone = fields.Char(string="Phone")
# Address in Detail
door_no = fields.Char(string="Door No")
building_name = fields.Char(string="Building Name")
street_1 = fields.Char(string="Street 1")
street_2 = fields.Char(string="Street 2")
locality = fields.Char(string="locality")
landmark = fields.Char(string="landmark")
city = fields.Char(string="City")
state_id = fields.Many2one(comodel_name="res.country.state", string="State",
default=lambda self: self.env.user.company_id.state_id.id)
country_id = fields.Many2one(comodel_name="res.country", string="Country")
pin_code = fields.Char(string="Pincode")
# Account Details
bank = fields.Char(string="Bank")
account_no = fields.Char(string="Account No")
aadhaar_card = fields.Char(string="Aadhaar Card")
pan_card = fields.Char(string="Pan Card")
driving_license = fields.Char(string="Driving License")
passport = fields.Char(string="Passport")
epf_no = fields.Char(string="EPF No")
epf_nominee = fields.Char(string="EPF Nominee")
identity_ids = fields.One2many(comodel_name="arc.identity", inverse_name="employee_id")
# HR Details
doj = fields.Date(string="Date of Joining", required=True)
date_of_relieving = fields.Date(string="Date of Relieving")
department_id = fields.Many2one(comodel_name="hr.department", string="Department")
designation_id = fields.Many2one(comodel_name="hr.designation", string="Designation")
reporting_to_id = fields.Many2one(comodel_name="hr.employee", string="Reporting To")
category_id = fields.Many2one(comodel_name="hr.category", string="Employee Category", required=True)
qualification_ids = fields.One2many(comodel_name="arc.qualification", inverse_name="employee_id")
experience_ids = fields.One2many(comodel_name="hr.experience", inverse_name="employee_id")
# Personnel Details
age = fields.Integer(string="Age")
blood_group = fields.Selection(selection=BLOOD_GROUP, string="Blood Group")
marital_status = fields.Selection(selection=MARITAL_STATUS, string="Marital Status")
gender = fields.Selection(selection=GENDER, string="Gender")
caste = fields.Char(string="Caste")
religion_id = fields.Many2one(comodel_name="arc.religion", string="Religion")
physically_challenged = fields.Boolean(string="Physically Challenged")
nationality_id = fields.Many2one(comodel_name="res.country")
mother_tongue_id = fields.Many2one(comodel_name="arc.language", string="Mother Tongue")
language_known_ids = fields.Many2many(comodel_name="arc.language", string="Language Known")
personnel_mobile = fields.Char(string="Personnel Mobile")
personnel_email = fields.Char(string="Personnel Email")
permanent_address = fields.Text(string="Permanent Address")
family_member_ids = fields.One2many(comodel_name="arc.address", inverse_name="employee_id")
# Leave
leave_level_id = fields.Many2one(comodel_name="leave.level", string="Leave Level")
# Attachment
attachment_ids = fields.Many2many(comodel_name="ir.attachment", string="Attachment")
# Smart Button
# View Complaint
def action_view_complaint(self):
pass
# View Promotion
def action_view_promotion(self):
pass
# View Payslip
def action_view_payslip(self):
pass
# View Work Sheet
def action_view_work_sheet(self):
pass
# View Attendance
def action_view_attendance(self):
pass
def update_person_address(self):
recs = {}
recs["name"] = self.name
recs["person_uid"] = self.employee_uid
recs["image"] = self.image
recs["small_image"] = self.small_image
recs["email"] = self.email
recs["mobile"] = self.mobile
recs["phone"] = self.phone
recs["door_no"] = self.door_no
recs["building_name"] = self.building_name
recs["street_1"] = self.street_1
recs["street_2"] = self.street_2
recs["locality"] = self.locality
recs["landmark"] = self.landmark
recs["city"] = self.city
recs["state_id"] = self.state_id.id
recs["country_id"] = self.country_id.id
recs["pin_code"] = self.pin_code
recs["is_employee"] = True
self.person_id.write(recs)
@api.multi
def write(self, vals):
rec = super(Employee, self).write(vals)
self.update_person_address()
return rec
@api.model
def create(self, vals):
data = {"person_uid": self.env["ir.sequence"].next_by_code(self._name),
"is_employee": True,
"name": vals["name"]}
data.update(vals)
person_id = self.env["arc.person"].create(data)
vals["person_id"] = person_id.id
vals["employee_uid"] = data["person_uid"]
return super(Employee, self).create(vals)
|
[
"ram@hk.com"
] |
ram@hk.com
|
0e251945353b6973051c3cbc43b6db7207b75a99
|
5a07e47aa8c065622d8d384c6b3b17981b24f0ae
|
/Batch_6_30/bye.py
|
ee8b169ee8edd349c32d61494e1f173ffd5f661d
|
[] |
no_license
|
neelshrma/Old_Python_Codes
|
629a7c113d56e96014c0d4b8d11126c79789335c
|
410de97e8d581e55fe53822528a8e38f15e349ef
|
refs/heads/master
| 2020-03-29T22:31:03.993335
| 2018-09-25T13:34:25
| 2018-09-25T13:34:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
x = int(input("Enter X : "))
y = int(input("Enter Y : "))
z = int(input("Enter Z : "))
print("Value of x = ",x)
print("Value of x = ",y)
print("Value of z = ",z)
print("Yeah vim is awesome")
|
[
"sachinyadav3496@gmail.com"
] |
sachinyadav3496@gmail.com
|
ae8681babbdc166cc37dcc1a309547c0d4cd968b
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-smn/huaweicloudsdksmn/v2/model/resource_tag.py
|
9f8339f814ea3c3cb36b356419316355ec49a31a
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 3,930
|
py
|
# coding: utf-8
import pprint
import re
import six
class ResourceTag:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key': 'str',
'value': 'str'
}
attribute_map = {
'key': 'key',
'value': 'value'
}
def __init__(self, key=None, value=None):
"""ResourceTag - a model defined in huaweicloud sdk"""
self._key = None
self._value = None
self.discriminator = None
self.key = key
self.value = value
@property
def key(self):
"""Gets the key of this ResourceTag.
键,表示要匹配的字段。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - key不能重复,value为匹配的值。 - 此字段为固定字典值。 - 不允许为空字符串。
:return: The key of this ResourceTag.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this ResourceTag.
键,表示要匹配的字段。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - key不能重复,value为匹配的值。 - 此字段为固定字典值。 - 不允许为空字符串。
:param key: The key of this ResourceTag.
:type: str
"""
self._key = key
@property
def value(self):
"""Gets the value of this ResourceTag.
值。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - 每个值最大长度255个unicode字符。 - 不可以为空。
:return: The value of this ResourceTag.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ResourceTag.
值。 当前key的参数值只能取“resource_name”,此时value的参数值为云服务器名称。 - 每个值最大长度255个unicode字符。 - 不可以为空。
:param value: The value of this ResourceTag.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceTag):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
b1baa12a4f9b71aa7c00c72ad4e34ce790b6cb4e
|
b66746b1d1c0a2800faab41488f2a67ed43255b8
|
/Knn.py
|
5c5aa5ea0c3cc8ec1b0c652f9d7fde2654e23aea
|
[] |
no_license
|
yzsxjhft/emg
|
28a0501810a86962f6a510fbe9f6d22346b4d963
|
96075c7124d0e50983221a7b4b4a8a5fba7bb352
|
refs/heads/master
| 2020-04-14T13:00:41.374480
| 2019-09-28T07:02:02
| 2019-09-28T07:02:02
| 163,856,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,485
|
py
|
import numpy as np
import sys
class Knn:
def __init__(self, k=5):
self.k = k
self.train_data = None
self.train_target = None
def fit(self, train_data, train_target):
self.train_data = train_data
self.train_target = train_target
def predict(self, test_data):
y = list()
for params in test_data:
distance = list()
for i in range(len(self.train_data)):
data = self.train_data[i]
dist = 0
for j in range(len(data)):
row = data[j]
dist += self.dtw_distance(params[j], row)
distance.append(dist/8.0)
indexs = np.argsort(np.array(distance), axis=0)[:self.k]
labels = np.array([self.train_target[x] for x in indexs])
y.append(np.argmax(np.bincount(labels)))
return y
def dtw_distance(self, ts_a, ts_b):
"""Returns the DTW similarity distance between two 2-D
timeseries numpy arrays.
Arguments
---------
ts_a, ts_b : array of shape [n_samples, n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared
d : DistanceMetric object (default = abs(x-y))
the distance measure used for A_i - B_j in the
DTW dynamic programming function
Returns
-------
DTW distance between A and B
"""
d = lambda x, y: abs(x - y)
max_warping_window = 10000
# Create cost matrix via broadcasting with large int
ts_a, ts_b = np.array(ts_a), np.array(ts_b)
M, N = len(ts_a), len(ts_b)
cost = sys.maxsize * np.ones((M, N))
# Initialize the first row and column
cost[0, 0] = d(ts_a[0], ts_b[0])
for i in range(1, M):
cost[i, 0] = cost[i - 1, 0] + d(ts_a[i], ts_b[0])
for j in range(1, N):
cost[0, j] = cost[0, j - 1] + d(ts_a[0], ts_b[j])
# Populate rest of cost matrix within window
for i in range(1, M):
for j in range(max(1, i - max_warping_window),
min(N, i + max_warping_window)):
choices = cost[i - 1, j - 1], cost[i, j - 1], cost[i - 1, j]
cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])
# Return DTW distance given window
return cost[-1, -1]
|
[
"="
] |
=
|
1e95b79a26ebe3af47d29ddc95baa5d79727e5b6
|
6fe0a724e9c5d3975ddb95eebe3032eb213f8840
|
/tensorflow_datasets/core/visualization/show_examples.py
|
6d9ac13abe6f9146269e8b465bd6b53e02a37b35
|
[
"Apache-2.0"
] |
permissive
|
Yohnhahahage/datasets
|
caf8b7001046bbf1729d016abdeae7f69d75152b
|
08cf7709095860fe50ec10ea503c4095b69a5cb1
|
refs/heads/master
| 2022-12-09T09:36:40.123816
| 2020-09-22T19:03:15
| 2020-09-22T19:03:15
| 297,893,222
| 1
| 0
|
Apache-2.0
| 2020-09-23T07:46:27
| 2020-09-23T07:46:26
| null |
UTF-8
|
Python
| false
| false
| 5,033
|
py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Show example util.
"""
from typing import Any
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import dataset_info
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import splits
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.visualization import image_visualizer
from tensorflow_metadata.proto.v0 import statistics_pb2
_ALL_VISUALIZERS = [
image_visualizer.ImageGridVisualizer(),
]
def show_examples(
ds: tf.data.Dataset,
ds_info: dataset_info.DatasetInfo,
**options_kwargs: Any
):
"""Visualize images (and labels) from an image classification dataset.
This function is for interactive use (Colab, Jupyter). It displays and return
a plot of (rows*columns) images from a tf.data.Dataset.
Usage:
```python
ds, ds_info = tfds.load('cifar10', split='train', with_info=True)
fig = tfds.show_examples(ds, ds_info)
```
Args:
ds: `tf.data.Dataset`. The tf.data.Dataset object to visualize. Examples
should not be batched. Examples will be consumed in order until
(rows * cols) are read or the dataset is consumed.
ds_info: The dataset info object to which extract the label and features
info. Available either through `tfds.load('mnist', with_info=True)` or
`tfds.builder('mnist').info`
**options_kwargs: Additional display options, specific to the dataset type
to visualize. Are forwarded to `tfds.visualization.Visualizer.show`.
See the `tfds.visualization` for a list of available visualizers.
Returns:
fig: The `matplotlib.Figure` object
"""
if not isinstance(ds_info, dataset_info.DatasetInfo): # Arguments inverted
# `absl.logging` does not appear on Colab by default, so uses print instead.
print('WARNING: For consistency with `tfds.load`, the `tfds.show_examples` '
'signature has been modified from (info, ds) to (ds, info).\n'
'The old signature is deprecated and will be removed. '
'Please change your call to `tfds.show_examples(ds, info)`')
ds, ds_info = ds_info, ds
# Pack `as_supervised=True` datasets
if (
ds_info.supervised_keys
and isinstance(ds.element_spec, tuple)
and len(ds.element_spec) == 2
):
x_key, y_key = ds_info.supervised_keys
ds = ds.map(lambda x, y: {x_key: x, y_key: y})
for visualizer in _ALL_VISUALIZERS:
if visualizer.match(ds_info):
return visualizer.show(ds, ds_info, **options_kwargs)
raise ValueError(
'Visualisation not supported for dataset `{}`'.format(ds_info.name)
)
def show_statistics(
ds_info: dataset_info.DatasetInfo,
split: splits.Split = splits.Split.TRAIN,
disable_logging: bool = True,
) -> None:
"""Display the datasets statistics on a Colab/Jupyter notebook.
`tfds.show_statistics` is a wrapper around
[tensorflow_data_validation](https://www.tensorflow.org/tfx/data_validation/get_started)
which calls `tfdv.visualize_statistics`. Statistics are displayed using
[FACETS OVERVIEW](https://pair-code.github.io/facets/).
Usage:
```
builder = tfds.builder('mnist')
tfds.show_statistics(builder.info)
```
Or:
```
ds, ds_info = tfds.load('mnist', with_info)
tfds.show_statistics(ds_info)
```
Note: In order to work, `tensorflow_data_validation` must be installed and
the dataset info object must contain the statistics. For "official" datasets,
only datasets which have been added/updated recently will contains statistics.
For "custom" datasets, you need to generate the dataset with
`tensorflow_data_validation` installed to have the statistics.
Args:
ds_info: The `tfds.core.DatasetInfo` object containing the statistics.
split: Split for which generate the statistics.
disable_logging: `bool`, if True, disable the tfdv logs which can be
too verbose.
Returns:
`None`
"""
tfdv = lazy_imports_lib.lazy_imports.tensorflow_data_validation
if split not in ds_info.splits:
raise ValueError(
'Invalid requested split: \'{}\'. Only {} are availables.'.format(
split, list(ds_info.splits)))
# Creates the statistics.
statistics = statistics_pb2.DatasetFeatureStatisticsList()
statistics.datasets.add().CopyFrom(ds_info.splits[split].statistics)
with utils.disable_logging() if disable_logging else utils.nullcontext():
return tfdv.visualize_statistics(statistics)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
7e508f48a7e431f6b7f9011cf995cb87ad12f846
|
973da85ebe773dc35d6539397e754726e87171b8
|
/lcopt/parameters.py
|
ea0570fdab8b51a2f67fdec7f0d0392e27c0746c
|
[
"BSD-3-Clause"
] |
permissive
|
pjamesjoyce/lcopt
|
14c49f79a43e9dce220ef760fa9b207105779568
|
a167ecfa258e62e91af7dac6cbf70be5d63fff93
|
refs/heads/development
| 2023-05-22T20:45:50.252163
| 2020-04-02T09:53:07
| 2020-04-02T09:53:07
| 76,573,839
| 23
| 6
|
BSD-3-Clause
| 2018-10-04T13:38:48
| 2016-12-15T15:55:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,633
|
py
|
from bw2parameters.parameter_set import ParameterSet
from collections import OrderedDict
from copy import deepcopy
class LcoptParameterSet(ParameterSet):
"""
Subclass of `bw2parameters.parameter_set.ParameterSet` that takes a `lcopt.LcoptModel` and delegates parameter ordering and evaluation to `bw2parameters`
TODO: Add more documentation and write tests
"""
def __init__(self, modelInstance):
self.modelInstance = modelInstance
self.norm_params = self.normalise_parameters()
self.check_production_parameters_exist()
self.all_params = {**self.modelInstance.params, **self.modelInstance.production_params, **self.norm_params, **self.modelInstance.allocation_params}
self.bw2_params, self.bw2_global_params, self.bw2_export_params = self.lcopt_to_bw2_params(0)
super().__init__(self.bw2_params, self.bw2_global_params)
self.evaluated_parameter_sets = self.preevaluate_exchange_params()
def lcopt_to_bw2_params(self, ps_key):
k0 = list(self.modelInstance.parameter_sets.keys())[ps_key]
ps1 = self.modelInstance.parameter_sets[k0]
bw2_params = {k:{(x if x != 'function' else 'formula'):y for x, y in v.items()} for k,v in self.all_params.items()}
for k in bw2_params.keys():
bw2_params[k]['amount'] = ps1.get(k,0)
bw2_global_params = {x['name']: ps1.get(x['name'],x['default']) for x in self.modelInstance.ext_params}
bw2_export_params = []
for k, v in bw2_params.items():
to_append = {'name': k}
if v.get('formula'):
to_append['formula'] = v['formula']
else:
to_append['amount'] = v['amount']
bw2_export_params.append(to_append)
for k, v in bw2_global_params.items():
bw2_export_params.append({'name':k, 'amount':v})
return bw2_params, bw2_global_params, bw2_export_params
def normalise_parameters(self):
param_copy = deepcopy(self.modelInstance.params)
#production_params = deepcopy(self.modelInstance.production_params)
#allocation_params = deepcopy(self.modelInstance.allocation_params)
norm_params = OrderedDict()
for k, v in param_copy.items():
norm_params['n_{}'.format(k)] = {}
for key, item in v.items():
if key == 'function':
if not item:
norm_function = '{} / {}'.format(k, v['normalisation_parameter'])
else:
norm_function = '({}) / {}'.format(item, v['normalisation_parameter'])
norm_params['n_{}'.format(k)][key] = norm_function
else:
norm_params['n_{}'.format(k)][key] = item
return norm_params
def preevaluate_exchange_params(self):
evaluated_params = OrderedDict()
for n, k in enumerate(self.modelInstance.parameter_sets.keys()):
self.params, self.global_params, _ = self.lcopt_to_bw2_params(n)
self.evaluate_and_set_amount_field()
this_set = {}
for j, v in self.params.items():
this_set[j] = v['amount']
evaluated_params[k] = this_set
self.params, self.global_params , _ = self.lcopt_to_bw2_params(0)
self.evaluate_and_set_amount_field()
return evaluated_params
def check_production_parameters_exist(self):
""" old versions of models won't have produciton parameters, leading to ZeroDivision errors and breaking things"""
for k, v in self.modelInstance.parameter_sets.items():
for p_id in self.modelInstance.production_params.keys():
if v.get(p_id):
#print('{} already exists'.format(p_id))
pass
else:
#print('No production parameter called {} - setting it to 1'.format(p_id))
v[p_id] = 1.0
for p_id in self.modelInstance.allocation_params.keys():
if v.get(p_id):
#print('{} already exists'.format(p_id))
pass
else:
#print('No production parameter called {} - setting it to 1'.format(p_id))
v[p_id] = 1.0
|
[
"pjamesjoyce@gmail.com"
] |
pjamesjoyce@gmail.com
|
9298cc195bdd4bd86918dd770c297a04612887b4
|
1ebf853638e6e0344e3498f5840f055d29d5a311
|
/code/broadcast_send.py
|
1a39a6443466904ea4c72734c4f67e1909024233
|
[] |
no_license
|
LzWaiting/02.PythonNet
|
07d8b159d5fe3f89100dc1daf262b46bfa7d6fcb
|
adc317b5c19c339395fbb50c94f843880f03af7a
|
refs/heads/master
| 2020-06-09T01:56:20.984921
| 2019-06-23T12:52:31
| 2019-06-23T12:52:31
| 193,347,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
from socket import *
from time import sleep
# 设置目标地址
dest = ('192.168.10.255',9999)
s = socket(AF_INET,SOCK_DGRAM)
# 设置能够发送广播
s.setsockopt(SOL_SOCKET,SO_BROADCAST,1)
while True:
sleep(2)
try:
s.sendto('来呀,带你去看蓝色土耳其'.encode(),dest)
except (KeyboardInterrupt,SyntaxError):
raise
except Exception as e:
print(e)
s.close()
|
[
"910122456@qq.com"
] |
910122456@qq.com
|
dce6fb53f680c6e959e0f66f2fa901accf10520f
|
15c140fa5f116bcfacb9340aac77692bb4fa3d00
|
/rafa_care/ext/helpers/tz.py
|
be3f65e53299ec530b7105d95e1ecba1eda74d25
|
[] |
no_license
|
thcborges/rafa-care
|
76ceac3b93a4995729b36539c603ada77350b2fe
|
9cd2d39938aa998c9ffaf0ecd82027db26a9b514
|
refs/heads/main
| 2023-07-14T02:08:25.554888
| 2021-08-01T14:17:14
| 2021-08-01T14:17:14
| 383,315,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from datetime import datetime, timedelta, timezone
def convert_tz(timestamp: datetime, diff: int = -3) -> datetime:
return timestamp.astimezone(timezone(timedelta(hours=diff)))
|
[
"thiagoborges@id.uff.br"
] |
thiagoborges@id.uff.br
|
844c6961ba63d9ffe3c2f7755dd9152b2c57135d
|
f6dfd2373ba1d23ea71a7fd8447e16d797d34138
|
/hot_100/206_recursive.py
|
4a1a7fd59583000a6564c313e17743c343ead589
|
[] |
no_license
|
hwngenius/leetcode
|
284e65dc1727446a95a1c18edd6ef994692430ba
|
52f7974f986bfb3802defd214dea5b0f9b280193
|
refs/heads/master
| 2023-02-12T02:23:38.003197
| 2021-01-07T13:55:42
| 2021-01-07T13:55:42
| 266,340,286
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if not (head and head.next):
return head
ans=self.reverseList(head.next)
head.next.next=head
return ans
|
[
"1158213354@qq.com"
] |
1158213354@qq.com
|
86d9aa7ff24333c6fabe99130510cfa0aca8fd98
|
01aa3e4a81500081265bdaec2d127a76ffafbfab
|
/meraki/api/splash_login_attempts.py
|
5c7ddb302332314e8d45c7c623be67bf47e2f3e6
|
[
"MIT"
] |
permissive
|
itbj/meraki-dashboard
|
0f4ded074a615bf2d3c749779f5efc3165d88446
|
aa730d4f95b4a0fec180c610eeea56bd71ff48b4
|
refs/heads/master
| 2020-12-07T17:29:20.168805
| 2020-01-09T08:59:46
| 2020-01-09T08:59:46
| 232,761,086
| 2
| 0
|
MIT
| 2020-01-09T08:35:24
| 2020-01-09T08:35:23
| null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
class SplashLoginAttempts(object):
def __init__(self, session):
super(SplashLoginAttempts, self).__init__()
self._session = session
def getNetworkSplashLoginAttempts(self, networkId: str, **kwargs):
"""
**List the splash login attempts for a network**
https://api.meraki.com/api_docs#list-the-splash-login-attempts-for-a-network
- networkId (string)
- ssidNumber (integer): Only return the login attempts for the specified SSID
- loginIdentifier (string): The username, email, or phone number used during login
- timespan (integer): The timespan, in seconds, for the login attempts. The period will be from [timespan] seconds ago until now. The maximum timespan is 3 months
"""
kwargs.update(locals())
if 'ssidNumber' in kwargs:
options = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
assert kwargs['ssidNumber'] in options, f'''"ssidNumber" cannot be "{kwargs['ssidNumber']}", & must be set to one of: {options}'''
metadata = {
'tags': ['Splash login attempts'],
'operation': 'getNetworkSplashLoginAttempts',
}
resource = f'/networks/{networkId}/splashLoginAttempts'
query_params = ['ssidNumber', 'loginIdentifier', 'timespan']
params = {k: v for (k, v) in kwargs.items() if k in query_params}
return self._session.get(metadata, resource, params)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
420b6a0e980b2356a5ca6e2268c92fde01251ec9
|
683876019cad0b0d562ac7f9da8c679cb310cfb2
|
/2015/day07/part2.py
|
c267b7c7b3336aca216adb0658c6aee619228c27
|
[] |
no_license
|
CoachEd/advent-of-code
|
d028bc8c21235361ad31ea55922625adf743b5c8
|
10850d5d477c0946ef73756bfeb3a6db241cc4b2
|
refs/heads/master
| 2023-05-11T05:20:26.951224
| 2023-05-09T18:54:16
| 2023-05-09T18:54:16
| 160,375,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
import sys
import time
import numpy as np
start_secs = time.time()
def hasInstr(s):
if s.find('AND') != -1 or s.find('OR') != -1 or s.find('NOT') != -1 or s.find('LSHIFT') != -1 or s.find('RSHIFT') != -1:
return True
else:
return False
# read in input file
l=[]
my_file = open("inp.txt", "r")
lines = my_file.readlines()
for line in lines:
l.append(line.strip())
d = dict()
i = 0
while len(l) > 0:
if hasInstr(l[i]):
arr = l[i].split('->')
var = arr[-1].strip()
arr2 = arr[0].strip().split()
if l[i].find('NOT') != -1:
operand = arr2[1].strip()
if operand in d:
d[var] = ~d[operand]
del l[i]
else:
b1 = False
b2 = False
lft = arr2[0]
op = arr2[1]
rgt = arr2[2]
if lft.isdigit():
lft = int(lft)
b1 = True
if rgt.isdigit():
rgt = int(rgt)
b2 = True
if not b1:
b1 = lft in d
if b1:
lft = d[lft]
if not b2:
b2 = rgt in d
if b2:
rgt = d[rgt]
if b1 and b2:
# have operands, do cmd
if op == 'AND':
d[var] = lft & rgt
elif op == 'OR':
d[var] = lft | rgt
elif op == 'LSHIFT':
d[var] = lft << rgt
elif op == 'RSHIFT':
d[var] = lft >> rgt
del l[i]
else:
# no instr
arr = l[i].split('->')
var = arr[1].strip()
val = arr[0].strip()
if val.isdigit():
val = int(val)
d[var] = val
if var == 'b':
d[var] = 956 # override for Part 2
del l[i]
else:
if val in d:
d[var] = d[val]
del l[i]
i = i + 1
if i >= len(l):
i = 0
print('part 1: ' + str(np.uint16(d['a'])))
end_secs = time.time()
print(str(end_secs-start_secs))
|
[
"CoachEd@gmail.com"
] |
CoachEd@gmail.com
|
8fa326a6592555adfd01b1e25a06052b11d92f22
|
04f4558aa0dc904b8d7c0ab79b80ec11c34f8ccf
|
/test/test_boatroom.py
|
af2a4b21de7d53df004de2db2a6c8b49de755108
|
[
"Apache-2.0"
] |
permissive
|
scubawhere/scubawhere-api-python-client
|
0fc23ffb97446b0bb0825c93528f954e7d642cf4
|
9f8578e251492c7667f785df7b7c9d66e71f5c8e
|
refs/heads/master
| 2020-12-24T11:10:34.880348
| 2016-11-08T12:20:45
| 2016-11-08T12:20:45
| 73,180,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
# coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.boatroom import Boatroom
class TestBoatroom(unittest.TestCase):
""" Boatroom unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testBoatroom(self):
"""
Test Boatroom
"""
model = swagger_client.models.boatroom.Boatroom()
if __name__ == '__main__':
unittest.main()
|
[
"bryan@iqwebcreations.com"
] |
bryan@iqwebcreations.com
|
7d4c853c7c60f91bc251797865adca42be00b83e
|
b9972ae24a4f261a87997fea963f537abe741dbe
|
/Chapter01/randint.py
|
1a1c576a8bd8c0bfcc8b096767c7fb0218d03782
|
[
"MIT"
] |
permissive
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
90503ea9b7dfc59e45f596e8548a3371641162c1
|
3325cd5a4ed314be5c75552bfa4675f7fe17f8e2
|
refs/heads/master
| 2023-02-03T03:01:09.242951
| 2023-01-30T08:23:38
| 2023-01-30T08:23:38
| 133,922,003
| 18
| 7
| null | 2018-05-18T08:00:42
| 2018-05-18T07:52:51
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
>>> randint(0, 1000)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'randint' is not defined
>>> import random
>>> random.randint(0, 1000)
607
|
[
"subhalaxmin@packtpub.com"
] |
subhalaxmin@packtpub.com
|
f34fc930bab5ce3a714ffba25d5ebe66941772a3
|
9949275a60ee6267d59091cab5977b6a45515452
|
/antspynet/utilities/get_antsxnet_data.py
|
a74be4812a9140f35641bf127c4c325278cc8ed8
|
[] |
no_license
|
papasanimohansrinivas/ANTsPyNet
|
d81be45c984ce260385d28ef9e1bb51463f055b4
|
8e16b2d17be769d1d8913de7c3a68135e5ebe5ed
|
refs/heads/master
| 2023-06-25T02:04:43.456534
| 2021-07-22T13:51:29
| 2021-07-22T13:51:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,818
|
py
|
import tensorflow as tf
def get_antsxnet_data(file_id=None,
target_file_name=None,
antsxnet_cache_directory=None):
"""
Download data such as prefabricated templates and spatial priors.
Arguments
---------
file_id string
One of the permitted file ids or pass "show" to list all
valid possibilities. Note that most require internet access
to download.
target_file_name string
Optional target filename.
antsxnet_cache_directory string
Optional target output. If not specified these data will be downloaded
to the subdirectory ~/.keras/ANTsXNet/.
Returns
-------
A filename string
Example
-------
>>> template_file = get_antsxnet_data('biobank')
"""
def switch_data(argument):
switcher = {
"biobank": "https://ndownloader.figshare.com/files/22429242",
"croppedMni152": "https://ndownloader.figshare.com/files/22933754",
"croppedMni152Priors": "https://ndownloader.figshare.com/files/27688437",
"deepFlashPriors": "",
"deepFlashTemplateT1": "",
"deepFlashTemplateT2": "",
"mprage_hippmapp3r": "https://ndownloader.figshare.com/files/24984689",
"protonLungTemplate": "https://ndownloader.figshare.com/files/22707338",
"ctLungTemplate": "https://ndownloader.figshare.com/files/22707335",
"luna16LungPriors": "https://ndownloader.figshare.com/files/28253796",
"priorDktLabels": "https://ndownloader.figshare.com/files/24139802",
"S_template3": "https://ndownloader.figshare.com/files/22597175",
"priorDeepFlashLeftLabels": "https://ndownloader.figshare.com/files/25422098",
"priorDeepFlashRightLabels": "https://ndownloader.figshare.com/files/25422101",
"adni": "https://ndownloader.figshare.com/files/25516361",
"ixi": "https://ndownloader.figshare.com/files/25516358",
"kirby": "https://ndownloader.figshare.com/files/25620107",
"mni152": "https://ndownloader.figshare.com/files/25516349",
"nki": "https://ndownloader.figshare.com/files/25516355",
"nki10": "https://ndownloader.figshare.com/files/25516346",
"oasis": "https://ndownloader.figshare.com/files/25516352"
}
return(switcher.get(argument, "Invalid argument."))
if file_id == None:
raise ValueError("Missing file id.")
valid_list = ("biobank",
"croppedMni152",
"croppedMni152Priors",
"deepFlashPriors",
"deepFlashTemplateT1",
"deepFlashTemplateT2",
"mprage_hippmapp3r",
"protonLungTemplate",
"ctLungTemplate",
"luna16LungPriors",
"S_template3",
"priorDktLabels",
"priorDeepFlashLeftLabels",
"priorDeepFlashRightLabels",
"adni",
"ixi",
"kirby",
"mni152",
"nki",
"nki10",
"oasis",
"show")
if not file_id in valid_list:
raise ValueError("No data with the id you passed - try \"show\" to get list of valid ids.")
if file_id == "show":
return(valid_list)
url = switch_data(file_id)
if target_file_name == None:
target_file_name = file_id + ".nii.gz"
if antsxnet_cache_directory == None:
antsxnet_cache_directory = "ANTsXNet"
target_file_name_path = tf.keras.utils.get_file(target_file_name, url,
cache_subdir=antsxnet_cache_directory)
return(target_file_name_path)
|
[
"ntustison@gmail.com"
] |
ntustison@gmail.com
|
3100deb0f29b301a5dd06ce1413538c7df022b75
|
ec61b57a99d7683a668f4910c9fad4b1c9335525
|
/todo/41-firstMissingPositive.py
|
39b5bd971a07978100e79c13e9e1cb2c2b77d0d0
|
[] |
no_license
|
savadev/leetcode-2
|
906f467e793b9636965ab340c7753d9fc15bc70a
|
20f37130236cc68224082ef056dacd6accb374e3
|
refs/heads/master
| 2020-12-28T12:54:08.355317
| 2018-08-06T00:44:24
| 2018-08-06T00:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if nums == []:
return 1
MIN = min(nums)
MAX = max(nums)
SUM = sum(nums)
acc = 0
for x in range(MIN, MAX + 1):
acc+= x
print ('acc', acc)
print ('sum', SUM)
print('res', acc - SUM)
if acc - SUM == 0:
if MIN != 0:
return MIN - 1
return MAX + 1
return acc - SUM
r = Solution()
res = r.firstMissingPositive([1])
print (res)
|
[
"nmaswood@cs.uchicago.edu"
] |
nmaswood@cs.uchicago.edu
|
8b4c76e48970902fe165375a9bff089f9bd8547a
|
25e99a0af5751865bce1702ee85cc5c080b0715c
|
/python/src/pyprogbook/第二版(博碩)課本範例程式/ch8/RC_8_1.py
|
917a5b5f0a7c5398cc8649d18a378e6983072f0a
|
[] |
no_license
|
jasonblog/note
|
215837f6a08d07abe3e3d2be2e1f183e14aa4a30
|
4471f95736c60969a718d854cab929f06726280a
|
refs/heads/master
| 2023-05-31T13:02:27.451743
| 2022-04-04T11:28:06
| 2022-04-04T11:28:06
| 35,311,001
| 130
| 67
| null | 2023-02-10T21:26:36
| 2015-05-09T02:04:40
|
C
|
UTF-8
|
Python
| false
| false
| 624
|
py
|
#RC_8_1: 定義貨幣時間價值的類別
class TimeValue:
#終值
def fvfix(self, pv, i, n):
#fvfix: 計算終值公式
fv=pv*(1+i)**n
return(fv)
#現值
def pvfix(self, fv, i, n):
#pvfix: 計算現值公式
pv=fv/((1+i)**n)
return(pv)
#設定初始值
pv=100
fv=115.92740743
i=0.03
n=5
#呼叫TimeValue類別,建構物實體
tv1=TimeValue()
#呼叫物件實體的方法
print('%d年後的終值 = %10.4f' %(n, tv1.fvfix(pv, i, n)))
print('%d年後的現值 = %10.4f' %(n, tv1.pvfix(fv, i, n)))
|
[
"jason_yao"
] |
jason_yao
|
de72efb471a01079e3b835086bdf362a43ce075b
|
fc29ccdcf9983a54ae2bbcba3c994a77282ae52e
|
/Leetcode/813-dp_interval.py
|
eebabd32aa4ba9891a08ae004244921141caf878
|
[] |
no_license
|
linnndachen/coding-practice
|
d0267b197d9789ab4bcfc9eec5fb09b14c24f882
|
5e77c3d7a0632882d16dd064f0aad2667237ef37
|
refs/heads/master
| 2023-09-03T19:26:25.545006
| 2021-10-16T16:29:50
| 2021-10-16T16:29:50
| 299,794,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
# max(score of sum of avg of each group)
class Solution:
def largestSumOfAverages(self, A: List[int], K: int) -> float:
n = len(A)
prefix_sum = [0] * (n+1)
# dp = [[0] * (K+1) for _ in range(n)]
memo = {}
for i in range(n):
prefix_sum[i+1] = A[i] + prefix_sum[i]
def avg(i,j):
ag = (prefix_sum[j] - prefix_sum[i]) / (j - i)
return ag
def dfs(idx, k):
if (idx, k) in memo:
return memo[(idx, k)]
if k==1:
# base case
return (prefix_sum[-1] - prefix_sum[idx]) / (n - idx)
res = 0
for i in range(idx, n-k+1):
# avg of 0~i + divide what's left into k groups
res = max(res, avg(idx,i+1) + dfs(i+1, k-1))
memo[(idx, k)] = res
return res
return dfs(0, K)
"""
def largestSumOfAverages(self, A: List[int], K: int) -> float:
N = len(A)
prefix_sum = [0] * (N+1)
for i in range(1, len(A)+1):
prefix_sum[i] = prefix_sum[i-1] + A[i-1]
dp = [self._average(prefix_sum, i, N) for i in range(N)]
print(dp)
# only 1 group, 2 groups and etc
for k in range(1, min(N, K)):
for i in range(N):
# if we have already decided a group, find the rest k
for j in range(i+1, N):
dp[i] = max(dp[i], self._average(prefix_sum, i, j) + dp[j])
return dp[0]
def _average(self, prefix_arr, i, j):
return (prefix_arr[j] - prefix_arr[i]) / float(j - i)
"""
# 0..........j j+1.....i (total needs k groups)
# dp[k-1][j] 1 group
# from 0 - j, divide it into k - 1 group
|
[
"lchen.msc2019@ivey.ca"
] |
lchen.msc2019@ivey.ca
|
b3fcc97c7467ef879e7c063a68bc03ed11f438e1
|
ffad717edc7ab2c25d5397d46e3fcd3975ec845f
|
/Python/pyesri/EXAMPLES/try_finally.py
|
403502b28258223cd220ecccdbaf6d4ea091d6d2
|
[] |
no_license
|
shaunakv1/esri-developer-conference-2015-training
|
2f74caea97aa6333aa38fb29183e12a802bd8f90
|
68b0a19aac0f9755202ef4354ad629ebd8fde6ba
|
refs/heads/master
| 2021-01-01T20:35:48.543254
| 2015-03-09T22:13:14
| 2015-03-09T22:13:14
| 31,855,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
#!/usr/bin/python
try:
x = 5
y = "cheese"
z = x + y
print "Bottom of try"
except Exception as err:
print err
exit()
finally:
print "Cleaning up..."
|
[
"shaunakv1@gmail.com"
] |
shaunakv1@gmail.com
|
b0cc56f055ae7cc4900fceed8dec1cb1322b507b
|
e63a36870512edb7fd947b809631cf153b028997
|
/surveil/api/hooks.py
|
18a8ba224fc1247642016889a8ab4fdf84124df5
|
[
"Apache-2.0"
] |
permissive
|
titilambert/surveil
|
632c7e65d10e03c675d78f278822015346f5c47a
|
8feeb64e40ca2bd95ebd60506074192ecdf627b6
|
refs/heads/master
| 2020-05-25T13:36:59.708227
| 2015-06-29T14:07:07
| 2015-06-29T14:07:07
| 38,249,530
| 1
| 0
| null | 2015-06-29T13:38:04
| 2015-06-29T13:38:03
| null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
# Copyright 2014 - Savoir-Faire Linux inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import influxdb
from pecan import hooks
import pymongo
class DBHook(hooks.PecanHook):
def __init__(self, mongo_url, ws_arbiter_url, influxdb_url):
self.mongo_url = mongo_url
self.ws_arbiter_url = ws_arbiter_url
self.influxdb_url = influxdb_url
def before(self, state):
self.mongoclient = pymongo.MongoClient(self.mongo_url)
state.request.mongo_connection = self.mongoclient
state.request.ws_arbiter_url = self.ws_arbiter_url
state.request.influxdb_client = influxdb.InfluxDBClient.from_DSN(
self.influxdb_url
)
def after(self, state):
self.mongoclient.close()
|
[
"alexandre.viau@savoirfairelinux.com"
] |
alexandre.viau@savoirfairelinux.com
|
12ec553dc3a74926d2a8d0a31bcb4162d631e912
|
c68b99bf1671d1fb5a1a5a0d6df7bb164dd1d20d
|
/Medium/**1111-MaximumNestingDepthOfTwoValidParenthesesStrings.py
|
58c02a74e38a8ba9adc21370c7f5e9fadd219265
|
[] |
no_license
|
r50206v/Leetcode-Practice
|
8db9333e2e3d2a335f439d7e9e57e8c36b69ae6d
|
f9302e93c441f06cc14949605da20978c4289202
|
refs/heads/master
| 2022-05-17T18:09:48.857263
| 2022-04-27T01:02:12
| 2022-04-27T01:02:12
| 192,258,017
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
# Solution
'''
Runtime: 44 ms, faster than 31.28% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
Memory Usage: 12.2 MB, less than 100.00% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
from:
https://leetcode.com/problems/maximum-nesting-depth-of-two-valid-parentheses-strings/discuss/329010/Python-O(n)
'''
class Solution(object):
def maxDepthAfterSplit(self, seq):
stack = []
n = len(seq)
res = [0] * n
for i in range(n):
s = seq[i]
if s == "(":
if stack:
res[i] = 1 - res[stack[-1]]
stack.append(i)
elif s == ")":
res[i] = res[stack[-1]]
stack.pop()
return res
# Solution
'''
Runtime: 36 ms, faster than 76.78% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
Memory Usage: 12 MB, less than 100.00% of Python online submissions for Maximum Nesting Depth of Two Valid Parentheses Strings.
from contest winners~
https://leetcode.com/contest/weekly-contest-144/ranking/
'''
class Solution:
def maxDepthAfterSplit(self, seq: str) -> List[int]:
a = 0
b = 0
n = len(seq)
ans = [0] * n
for i, c in enumerate(seq):
if c == '(':
if a < b:
a += 1
ans[i] = 0
else:
b += 1
ans[i] = 1
else:
if a < b:
b -= 1
ans[i] = 1
else:
a -= 1
ans[i] = 0
return ans
|
[
"r50206v@gmail.com"
] |
r50206v@gmail.com
|
5e03456ca1fac16e1a05a8cc6d80041b70bfde5c
|
be55991401aef504c42625c5201c8a9f14ca7c3b
|
/python全栈3期/IO模型/selectorsDemo01.py
|
532bbf1784d32868773b2e07e5aa460d2c260c73
|
[
"Apache-2.0"
] |
permissive
|
BillionsRichard/pycharmWorkspace
|
adc1f8bb15b58ded489fc8dec0df397601823d2c
|
709e2681fc6d85ff52fb25717215a365f51073aa
|
refs/heads/master
| 2021-09-14T21:12:59.839963
| 2021-08-08T09:05:37
| 2021-08-08T09:05:37
| 143,610,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
# encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: billions.richard@qq.com
@site:
@software: PyCharm
@file: selectorsDemo01.py
@time: 2018/6/2 18:53
"""
from pprint import pprint as P
import selectors
import socket
SELECTOR = selectors.DefaultSelector()
def accept_fun(socket, mask):
client_skt, client_addr = socket.accept()
client_port = client_addr[1]
P('client coming:%s' % client_port)
SELECTOR.register(client_skt, selectors.EVENT_READ, read_fun)
def read_fun(socket, mask):
data = socket.recv(1000).decode('utf8')
if data:
print('received:%s' % data)
socket.send(data.upper().encode('utf8'))
else:
P('no data received....')
server_sock = socket.socket()
server_sock.bind(("127.0.0.1", 8080))
server_sock.listen(100)
server_sock.setblocking(False)
SELECTOR.register(server_sock, selectors.EVENT_READ, accept_fun)
while True:
events = SELECTOR.select() #循环监听事件。
for key, mask in events:
callback = key.data
callback(key.fileobj, mask) #调用事先注册的回掉函数。
|
[
"295292802@qq.com"
] |
295292802@qq.com
|
c8c79098eb281dfb93aa7a5c3ba0399c3f545203
|
0ff0bd21faecdeebc3a29fc5860d25fb8f079aae
|
/rep_k.py
|
c43b3903b48080a05e6bffc887c6778d0b1d9245
|
[] |
no_license
|
Ponkiruthika112/codesetkataset2
|
652297278e84de07d5d3fc5dfa2eb3f995258cab
|
bd8a96d2fb357ff571f2650fdfca911fba8cc999
|
refs/heads/master
| 2020-04-15T09:49:55.421089
| 2019-02-18T16:18:55
| 2019-02-18T16:18:55
| 164,567,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
n=list(map(int,input().split()))
k=l[-1]
a=list(map(int,input().split()))
l=list(set(a))
s=""
for i in range(0,len(l)):
if a.count(l[i])==k:
s=s+str(l[i])+" "
print(s.strip())
#repeat k times
|
[
"noreply@github.com"
] |
Ponkiruthika112.noreply@github.com
|
8df66b74e02e272a2ac172cbc9aba54662670f75
|
b50f43c7c8cba1c0f349870596f12d1a333e6f42
|
/axonius_api_client/cli/grp_system/grp_settings/cmd_update_section.py
|
cce5589c8ccc9641e146f3457660e8495c6476f7
|
[
"MIT"
] |
permissive
|
zahediss/axonius_api_client
|
190ca466e5de52a98af9b527a5d1c132fd8a5020
|
8321788df279ffb7794f179a4bd8943fe1ac44c4
|
refs/heads/master
| 2023-08-01T14:35:17.095559
| 2021-09-13T21:04:23
| 2021-09-13T21:04:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
# -*- coding: utf-8 -*-
"""Command line interface for Axonius API Client."""
from ....tools import json_dump
from ...context import CONTEXT_SETTINGS, click
from ...options import AUTH, SPLIT_CONFIG_OPT, add_options
from .grp_common import EXPORT, SECTION, str_section
OPTIONS = [*AUTH, EXPORT, SECTION, SPLIT_CONFIG_OPT]
@click.command(name="update-section", context_settings=CONTEXT_SETTINGS)
@add_options(OPTIONS)
@click.pass_context
def cmd(
ctx,
url,
key,
secret,
config,
section,
export_format,
**kwargs,
):
"""Update a section from arguments."""
client = ctx.obj.start_client(url=url, key=key, secret=secret)
new_config = dict(config)
apiname = ctx.parent.command.name.replace("-", "_")
apiobj = getattr(client, apiname)
with ctx.obj.exc_wrap(wraperror=ctx.obj.wraperror):
settings = apiobj.update_section(section=section, **new_config)
ctx.obj.echo_ok(f"Updated {section!r} with configuration {new_config}")
if export_format == "str":
str_section(meta=settings)
ctx.exit(0)
if export_format == "json-config":
config = settings["config"]
click.secho(json_dump(config))
ctx.exit(0)
if export_format == "json-full":
click.secho(json_dump(settings))
ctx.exit(0)
ctx.exit(1)
|
[
"jimbosan@gmail.com"
] |
jimbosan@gmail.com
|
28b92fed6d4c115fe7c615905f1a41b510587860
|
b44ae8c215c7577616ce94bbddda57d46ff46577
|
/experiments/convergence/movielens_100K/gaussian_exponential.py
|
a143962077d68e7428eb01f52aa54a4f6ed764b5
|
[] |
no_license
|
changchunli/BMF_Priors
|
06a74d89198b11c0c3ba673a1d4869986cd7bc2d
|
15b20537eefd36347ed84617882eeea1c453e162
|
refs/heads/master
| 2020-03-21T07:50:08.081910
| 2018-06-10T10:22:04
| 2018-06-10T10:22:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
'''
Measure convergence on the MovieLens 100K dataset, with the Gaussian +
Exponential model.
'''
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_gaussian_exponential import BMF_Gaussian_Exponential
from BMF_Priors.data.movielens.load_data import load_movielens_100K
from BMF_Priors.experiments.convergence.convergence_experiment import measure_convergence_time
import matplotlib.pyplot as plt
''' Run the experiment. '''
R, M = load_movielens_100K()
model_class = BMF_Gaussian_Exponential
settings = {
'R': R,
'M': M,
'K': 20,
'hyperparameters': { 'alpha':1., 'beta':1., 'lamb':0.1 },
'init': 'random',
'iterations': 200,
}
fout_performances = './results/performances_gaussian_exponential.txt'
fout_times = './results/times_gaussian_exponential.txt'
repeats = 10
performances, times = measure_convergence_time(
repeats, model_class, settings, fout_performances, fout_times)
''' Plot the times, and performance vs iterations. '''
plt.figure()
plt.title("Performance against average time")
plt.plot(times, performances)
plt.ylim(0,2000)
plt.figure()
plt.title("Performance against iteration")
plt.plot(performances)
plt.ylim(0,2000)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
041808402b9a434766690092176281e5cd6fc449
|
168bccd6fbd54025edeb526a497f4cd143390608
|
/Datascience/Bee_Word_Project_old/Bee_Word_Project_bkup/asgi.py
|
348f1b65e1c401a5530a6c199f263c2a0ada07ce
|
[] |
no_license
|
mbegumgit/Mumtaz
|
4e9cdd2b9a9b437cb5b3e0534a673aecc1366bd0
|
2edbc5e828ba6803580ff90beaf4c7cc7ace23de
|
refs/heads/master
| 2022-03-05T18:41:28.474102
| 2022-02-18T07:33:06
| 2022-02-18T07:33:06
| 210,820,610
| 0
| 0
| null | 2022-02-18T09:50:24
| 2019-09-25T10:43:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for Bee_Word_Project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Bee_Word_Project.settings')
application = get_asgi_application()
|
[
"you@example.com"
] |
you@example.com
|
598046ba24df38670274f2af60bbeef0dfaf8a4e
|
268b22da698310c1fd0471f94d61e02782cbaf37
|
/Week6/week6work/test/app.py
|
b05a86a61e85ac24690e059365b0a5e5c58330f3
|
[] |
no_license
|
jayquake/DI-Excersises
|
0c1147863753fb29a6f688bd73bdd9acc047c180
|
02cb0ee9baed7fd7736273e8fc68317ba4356e39
|
refs/heads/master
| 2020-12-10T11:38:12.225341
| 2020-05-06T08:34:35
| 2020-05-06T08:34:35
| 233,582,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
from flask import Flask
from flask import render_template
import users
from forms import LoginForm
from flask import request
from flask import session
from flask import flash
app = Flask(__name__)
users = users.create_database()
app.config['SECRET_KEY'] = 'you-will-never-guess'
@app.route('/')
def home():
html = render_template('index.html')
return html
@app.route('/login', methods=['POST'])
def login():
form = LoginForm()
login =render_template('login.html',form = form)
if request.form['password'] == 'password' and request.form['username'] == 'admin':
session['logged_in'] = True
else:
flash('wrong password!')
if __name__ == '__main__':
app.run()
|
[
"jayquake@gmail.com"
] |
jayquake@gmail.com
|
8066a57e0fc178c600664597a65ee9595bc1c3a3
|
06525f75c7fe5ba75b0737d7b93e48cca9c24706
|
/django_gotolong/jsched/tasks.py
|
f08ea74499eb6979c7339b33a028882120f323fd
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
webclinic017/gotolong
|
029314121c9cb6ce66c95fab6a237aca9a3ecd6c
|
3bb5ec7c7a5734e7121d308769a3ed8bb01e7f30
|
refs/heads/master
| 2022-04-21T08:06:11.669599
| 2022-04-02T20:13:11
| 2022-04-02T20:13:11
| 247,398,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
# Create your views here.
# Create your views here.
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from datetime import date, timedelta
from apscheduler.schedulers.background import BackgroundScheduler
from django_gotolong.bhav.views import bhav_fetch
from django_gotolong.ftwhl.views import ftwhl_fetch
from django.utils import timezone
# from background_task import background
import requests
import sys
# notify_user(user.id, schedule=90) # 90 seconds from now
# notify_user(user.id, schedule=timedelta(minutes=20)) # 20 minutes from now
# notify_user(user.id, schedule=timezone.now()) # at a specific time
# @background(schedule=15)
def jsched_task_bg():
print('tasks.py : notify_nse')
# redirect('bhav-fetch')
# redirect('ftwhl-fetch')
def jsched_task_daily():
print('tasks.py : jsched_task_daily: to be fixed later')
return
if True:
tmodules = ['bhav', 'ftwhl']
for tmod in tmodules:
try:
url = 'http://127.0.0.1:8000/' + tmod + '/fetch/'
# connect timeout - 5 sec
# read timeout - 14 sec
response = requests.get(url, allow_redirects=False, timeout=(15, 60))
print(url, response.url, response.status_code)
except:
print("Unexpected error:", url, sys.exc_info())
def jsched_task_common():
print('tasks.py : common tasks')
# HttpResponseRedirect(reverse('bhav-fetch'))
# HttpResponseRedirect(reverse('ftwhl-fetch'))
# redirect('bhav-fetch')
# redirect('ftwhl-fetch')
def jsched_task_startup():
print('tasks.py : start')
# notify_nse(repeat=Task.DAILY, repeat_until=None)
# scheduler = BackgroundScheduler()
# scheduler.add_job(jsched_task_common, 'interval', days=1)
# scheduler.start()
|
[
"surinder.kumar.432@gmail.com"
] |
surinder.kumar.432@gmail.com
|
30297a3554de37ae2bbeae152c60ce87fa548148
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_ConstantTrend_Seasonal_WeekOfYear_ARX.py
|
301a6ea127790f581fe24c9e7a8e24a8c81520e9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 180
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['ConstantTrend'] , ['Seasonal_WeekOfYear'] , ['ARX'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
0a3851483a7c2783d20f9210c909db95e5a3effb
|
ce2e307f8725b7bbe4c9177ed0f6b8bd74ae6cbe
|
/src/cw_20/models.py
|
3aec0e1efda9e87be7b828ff9e1b79bcdfbc6885
|
[] |
no_license
|
alexshchegretsov/Teach_Me_Skills_Django_homeworks
|
f2a096f60bf8fe2e45693dd2352341529007327c
|
dcde073292e1cfb15708cdb3dd8d539fae37143a
|
refs/heads/master
| 2020-05-25T00:07:36.348802
| 2019-06-06T12:21:35
| 2019-06-06T12:21:35
| 187,528,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
from django.db import models
class Customer(models.Model):
first_name = models.CharField(max_length=200,null=False)
last_name = models.CharField(max_length=200, null=False)
profession = models.CharField(max_length=200, null=True)
age = models.IntegerField()
def __str__(self):
return f'{self.first_name} {self.last_name}'
|
[
"nydollz77@gmail.com"
] |
nydollz77@gmail.com
|
df4b35ec2d7b11d26059caeb6712174bde347a30
|
b8cb10a3c99961f44ac758b3683523627d032680
|
/runoschool/runo/migrations/0008_auto_20201020_0530.py
|
735a3a0d26c067a411e76cd8bf585ce29bf7dc25
|
[] |
no_license
|
samuelatuma1/runoschool
|
4a2183be4a7e856723fc5368c90edcb79d6ed29e
|
ed75fb4077cf5ff86b7d546d3346fc4625bee97e
|
refs/heads/master
| 2023-01-29T20:22:25.160805
| 2020-12-14T08:33:13
| 2020-12-14T08:33:13
| 312,167,155
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
# Generated by Django 3.1.1 on 2020-10-20 04:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('runo', '0007_auto_20201019_1750'),
]
operations = [
migrations.RenameField(
model_name='welcleaders',
old_name='extraImg1',
new_name='extraLeader1',
),
migrations.RenameField(
model_name='welcleaders',
old_name='extraImg2',
new_name='extraLeader2',
),
migrations.RenameField(
model_name='welcleaders',
old_name='welcImg',
new_name='welcLeader',
),
]
|
[
"atumasaake@gmail.com"
] |
atumasaake@gmail.com
|
6a0feedd97724c95d4013b2c5c578158cfa386b9
|
c558fb26ab6cdc46c0a5ad292a34c20b52f96f42
|
/crud/employee/forms.py
|
8625d6cb27a789ac7671469062b188b1de343091
|
[] |
no_license
|
VaultHRMS/HRMS
|
9dad7e5416f008075ce4e50226e76ca86ae7d9b0
|
de714c63494a1a1260116a66e54fac8c032fd661
|
refs/heads/master
| 2020-08-15T07:37:02.311773
| 2019-11-05T10:09:44
| 2019-11-05T10:09:44
| 215,301,571
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from django import forms
from employee.models import Employee
class EmployeeForm(forms.ModelForm):
class Meta:
model = Employee
fields = "__all__"
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
5d62effa01623bbc899e69cedeac320b1a623569
|
e8501308efed70829ba70332d5ed7b956a245a41
|
/Lab/Lab11/Lab11-4.py
|
9ce9d856102df3c1961115034eac8160b5366c1b
|
[] |
no_license
|
Jinmin-Goh/DeepLearningPractice
|
293f7c41144d64c1044d27dadf16f563d7caabb4
|
b13cff775ad350deb0fde982610276c7b2fc7798
|
refs/heads/master
| 2021-02-28T07:21:51.231203
| 2020-03-20T17:15:42
| 2020-03-20T17:15:42
| 245,673,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,843
|
py
|
# Lab 11-4
# Made by: Jinmin Goh
# Date: 20200318
# TF layers, fancy coding
import tensorflow as tf
# import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
# hyper parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self._build_net()
def _build_net(self):
with tf.variable_scope(self.name):
# dropout (keep_prob) rate 0.7~0.5 on training, but should be 1
# for testing
self.training = tf.placeholder(tf.bool)
# input place holders
self.X = tf.placeholder(tf.float32, [None, 784])
# img 28x28x1 (black/white), Input Layer
X_img = tf.reshape(self.X, [-1, 28, 28, 1])
self.Y = tf.placeholder(tf.float32, [None, 10])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(inputs=X_img, filters=32, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2],
padding="SAME", strides=2)
dropout1 = tf.layers.dropout(inputs=pool1,
rate=0.3, training=self.training)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(inputs=dropout1, filters=64, kernel_size=[3, 3],
padding="SAME", activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
padding="SAME", strides=2)
dropout2 = tf.layers.dropout(inputs=pool2,
rate=0.3, training=self.training)
# Convolutional Layer #2 and Pooling Layer #2
conv3 = tf.layers.conv2d(inputs=dropout2, filters=128, kernel_size=[3, 3],
padding="same", activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2],
padding="same", strides=2)
dropout3 = tf.layers.dropout(inputs=pool3,
rate=0.3, training=self.training)
# Dense Layer with Relu
flat = tf.reshape(dropout3, [-1, 128 * 4 * 4])
dense4 = tf.layers.dense(inputs=flat,
units=625, activation=tf.nn.relu)
dropout4 = tf.layers.dropout(inputs=dense4,
rate=0.5, training=self.training)
# Logits (no activation) Layer: L5 Final FC 625 inputs -> 10 outputs
self.logits = tf.layers.dense(inputs=dropout4, units=10)
# define cost/loss & optimizer
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(self.cost)
correct_prediction = tf.equal(
tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def predict(self, x_test, training=False):
return self.sess.run(self.logits,
feed_dict={self.X: x_test, self.training: training})
def get_accuracy(self, x_test, y_test, training=False):
return self.sess.run(self.accuracy,
feed_dict={self.X: x_test,
self.Y: y_test, self.training: training})
def train(self, x_data, y_data, training=True):
return self.sess.run([self.cost, self.optimizer], feed_dict={
self.X: x_data, self.Y: y_data, self.training: training})
# initialize
sess = tf.Session()
m1 = Model(sess, "m1")
sess.run(tf.global_variables_initializer())
print('Learning Started!')
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
c, _ = m1.train(batch_xs, batch_ys)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
print('Accuracy:', m1.get_accuracy(mnist.test.images, mnist.test.labels))
|
[
"eric970901@gmail.com"
] |
eric970901@gmail.com
|
d46f12c676788ef01cdfbb69ba6c673db8a4a50d
|
affe6c648b9ce2434919ccbd88d36d969a619f94
|
/moya/containers.py
|
84db000ed363867ba8e37c4deb4c9f275fc93932
|
[
"BSD-3-Clause"
] |
permissive
|
thiagocalheiros/weasyprint_for_awslambda
|
578c4e8c10aef40f82e36f0a9de5ec31032335f8
|
4080c49a3fb5fc94fca75bf38e8b10ee1acfb7ce
|
refs/heads/master
| 2020-03-18T18:19:08.855086
| 2018-05-31T19:32:40
| 2018-05-31T19:32:40
| 135,084,321
| 0
| 0
| null | 2018-05-27T21:52:46
| 2018-05-27T21:52:46
| null |
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
from __future__ import unicode_literals
from __future__ import print_function
from .compat import PY2, text_type, implements_to_string
from .urltools import urlencode
from threading import Lock
if PY2:
from urlparse import parse_qsl
else:
from urllib.parse import parse_qsl
from collections import OrderedDict
class LRUCache(OrderedDict):
"""A dictionary-like container that stores a given maximum items.
If an additional item is added when the LRUCache is full, the least recently used key is
discarded to make room for the new item.
"""
def __init__(self, cache_size=None):
self.cache_size = cache_size
self.lock = Lock()
super(LRUCache, self).__init__()
def __reduce__(self):
return self.__class__, (self.cache_size,)
def __setitem__(self, key, value):
with self.lock:
if self.cache_size is not None and key not in self:
if len(self) >= self.cache_size:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, value)
def lookup(self, key):
with self.lock:
value = OrderedDict.__getitem__(self, key)
del self[key]
OrderedDict.__setitem__(self, key, value)
return value
@implements_to_string
class QueryData(OrderedDict):
"""A container for data encoded in a url query string"""
@classmethod
def from_qs(cls, qs, change_callback=None):
qd = cls()
for k, v in parse_qsl(qs, keep_blank_values=True, strict_parsing=False):
qd.setdefault(k, []).append(v)
return qd
def copy(self):
return OrderedDict(self)
def update(self, d):
"""Specialized update, setting a value to None will delete it. Also ensures that the query data contains lists"""
for k, v in d.items():
if v is None:
if k in self:
del self[k]
else:
if isinstance(v, (list, set, tuple, dict)) or hasattr(v, 'items'):
self[k] = list(v)
else:
if v is None:
v = ''
elif not isinstance(v, text_type):
v = text_type(v)
self[k] = [v]
def __str__(self):
return urlencode(self)
def __repr__(self):
return '<querydata "{}">'.format(urlencode(self))
def __setitem__(self, k, v):
if v is None:
ret = self.__delitem__(k)
else:
if isinstance(v, (set, tuple)):
v = list(v)
if not isinstance(v, list):
v = [text_type(v)]
ret = super(QueryData, self).__setitem__(k, v)
return ret
def __delitem__(self, k):
ret = super(QueryData, self).__delitem__(k)
return ret
if __name__ == "__main__":
qd = QueryData.from_qs('foo=bar&a=1&b=2&hobbit=frodo&hobbit=sam')
print(qd.items())
qd.update({'foo': None})
print(qd.items())
|
[
"thiagovccalheiros@gmail.com"
] |
thiagovccalheiros@gmail.com
|
3f9fb9d22bc078f7448e8a2437c435ac8a8a2f3c
|
9a7c84122bb5d52a4feeea37e6434adf844fe10a
|
/drf/SpiderPlatform/SpiderPlatform/wsgi.py
|
fcd3b77ab8b295e18bfa4df1b7562bfd1d0cb535
|
[] |
no_license
|
yangwen1997/code
|
f4dc17850b186860d6304efb8dd92a189f6b5e12
|
e697ca24405372c9320ed170478b16b4d539b13f
|
refs/heads/master
| 2022-12-10T21:50:37.361086
| 2020-08-14T01:02:12
| 2020-08-14T01:02:12
| 148,280,123
| 5
| 0
| null | 2022-12-08T10:49:07
| 2018-09-11T07:44:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for SpiderPlatform project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SpiderPlatform.settings')
application = get_wsgi_application()
|
[
"1120021365@qq.com"
] |
1120021365@qq.com
|
daff31afed353adf8d2c6c28cfc9180c89b9fffc
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_corning.py
|
55f3a44638ba8fcc4697231b72956c80f298f130
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _CORNING():
def __init__(self,):
self.name = "CORNING"
self.definitions = corn
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['corn']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
46fef4fb96cf282a8ebbeeff3f92d583c9c9beca
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_058/ch23_2019_09_16_18_49_35_451655.py
|
ff62dbbbbbfe3a0b1e8e382db69bd27edc33dfd8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
def verifica_idade(x):
if x>=21:
return "Libeirado EUA e BRASIL"
elif x>=18:
return "Liberado EUA"
else:
return "Não está liberado"
|
[
"you@example.com"
] |
you@example.com
|
8370c8f3e10ffb2ae7e3c1ef790b16d98bc32461
|
4114e7371af1da819a1c7a11ccc63a7961fd3c11
|
/tensorbridge/src/main/python/TFServer.py
|
8c272f4ca236cb6aba45066135f3da4938be9f43
|
[
"Apache-2.0"
] |
permissive
|
BBN-E/LearnIt
|
dad4e71da2e2028875807545ce75801067a7dd37
|
4f602f113cac9f4a7213b348a42c0fef23e2739c
|
refs/heads/master
| 2022-12-18T22:15:11.877626
| 2021-09-09T04:28:31
| 2021-09-09T04:28:31
| 193,987,875
| 8
| 2
|
Apache-2.0
| 2022-12-10T21:02:36
| 2019-06-26T22:53:56
|
Java
|
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
from flask import Flask, request, g, Response, jsonify
import sys
import json
import os
def initDecoder(decoderClass,paramsFile):
if len(decoderClass) == 0:
print "Empty decoder class not permitted"
sys.exit(1)
parts = decoderClass.split('.')
module = ".".join(parts[:-1])
print "Attempting to import module: %s" % module
m = __import__(module)
print dir(m)
for part in parts[1:]:
print "Processing part %s" % part
m = getattr(m, part)
return m(paramsFile)
app = Flask(__name__)
app.config.from_envvar('TF_SERVER_PARAMS')
decoder = initDecoder(app.config['DECODER_CLASS'], app.config['PARAMS'])
print "Decoder object initialized"
@app.route("/ready", methods=['GET'])
def ready():
return "true"
@app.route("/decode", methods=['POST'])
def decode():
decoded ={'labelToClassificationScore' : decoder.decode(request.get_json())}
newName = json['name'][::-1]
newVal = json['value'] + 100
return { 'name': newName, 'value' : newVal }
return jsonify(decoded)
@app.route("/shutdown", methods=['POST'])
def shutdown():
decoder.shutdown()
# see http://flask.pocoo.org/snippets/67/
shutdown_func = request.environ.get("werkzeug.server.shutdown")
if shutdown_func is None:
raise RuntimeError("Could not get shutdown function")
shutdown_func()
return "bye"
port = os.environ['TF_SERVER_PORT']
print "Running Tensorflow server on port %s" % port
app.run(port=port)
|
[
"hqiu@bbn.com"
] |
hqiu@bbn.com
|
43bb0e8f257aadaa0d1d098de741248fe249b3d3
|
a12a4be7e8c792b4c1f2765d3e7a43056e9196b0
|
/17-letter-combinations-of-a-phone-number/17-letter-combinations-of-a-phone-number.py
|
dcf91ce053c7b299be5f62a8c2801c58805c2e6d
|
[] |
no_license
|
fdas3213/Leetcode
|
d4b7cfab70446b3f6a961252a55b36185bc87712
|
1335d5759c41f26eb45c8373f33ee97878c4a638
|
refs/heads/master
| 2022-05-28T16:24:15.856679
| 2022-05-19T21:56:35
| 2022-05-19T21:56:35
| 94,024,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
#2. backtrack
res = []
if not digits:
return res
self.digit_map = {"1":"", "2":"abc","3":"def","4":"ghi",
"5":"jkl","6":"mno","7":"pqrs","8":"tuv", "9":"wxyz"}
def backtrack(start: int, cur: str):
if start==len(digits):
res.append(cur)
return
for i in range(start, len(digits)):
#to avoid cases like start=0, i=1, so "d" or "e" or "f" is added to the result list
if i > start:
continue
digit = digits[i]
for letter in self.digit_map[digit]:
backtrack(i+1, cur+letter)
backtrack(0, "")
return res
|
[
"szx9404@gmail.com"
] |
szx9404@gmail.com
|
60741e6eaadbfc2492da1af309879c5578cd89a2
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Flector_medicated_plaster_SmPC.py
|
b4cb409483f8f16e7249caaae0809f3c362a58ec
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
{'_data': [['Common', [['Skin', u'2,95%'], ['General', u'5%']]], ['Uncommon', []], ['Rare', []]],
'_note': u' ?MSFU',
'_pages': [3, 4],
u'_rank': 2,
u'_type': u'MSFU'}
|
[
"daro@daro-ThinkPad-X220.(none)"
] |
daro@daro-ThinkPad-X220.(none)
|
c7e3ef11b42f06a59db7b6364d8c81fa825e86a8
|
fa08645f5804c98cb5f8354a851e8b4dc93f8224
|
/assignment2/code/hot_city_mapper.py
|
0b78294d7d3878aedab09eb727efcaf599035f58
|
[] |
no_license
|
chrizandr/distributed_computing
|
a9aab6ba04cc5282a573f910434bd7525894f7cc
|
7979630bd97e36928f272f89be20e9990eb82747
|
refs/heads/master
| 2021-05-04T12:11:13.397801
| 2019-07-17T13:06:20
| 2019-07-17T13:06:20
| 120,289,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
#!/usr/bin/env python3
"""mapper.py."""
import sys
# input comes from STDIN (standard input)
data = []
for line in sys.stdin:
line = line.strip()
city, date, hi, lo = line.split(',')
print('{}\t{}'.format(city, hi))
|
[
"chris.g14@iiits.in"
] |
chris.g14@iiits.in
|
a4316d4be42d7c16f885ca1d1fb52f0985d3fdfd
|
21945fb6674298a3b2de064c55ca01205c646a1c
|
/week07/homework3.py
|
bb7da6f1bfdf92cab0f094fbbce5d737be54a4a1
|
[] |
no_license
|
Jiangjao/Python005-01
|
c200c4feff1042c7069595e64d6e7abd6530b762
|
04275c2d5d3d134f9ea0f8fe5eabceae9317683b
|
refs/heads/main
| 2023-03-23T20:51:05.371201
| 2021-03-14T08:04:06
| 2021-03-14T08:04:06
| 314,480,746
| 0
| 0
| null | 2020-11-20T07:39:25
| 2020-11-20T07:39:25
| null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
import time
import math
def timer(func):
def inner(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
time_delata = end - start
# time_delata = round((end - start),10)
print(func.__name__,f"花费时间大概{time_delata}毫秒")
return time_delata
return inner
@timer
def sayHello():
print('hello')
sayHello()
|
[
"918718278@qq.com"
] |
918718278@qq.com
|
3e126c527ecdbabbe3d020d939bbf9ae244d670e
|
0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e
|
/regexlib/2021-5-15/python_re2_test_file/regexlib_7979.py
|
bffb80e110380d59813cacb6a92cf1f35316152e
|
[
"MIT"
] |
permissive
|
agentjacker/ReDoS-Benchmarks
|
c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
refs/heads/main
| 2023-05-10T13:57:48.491045
| 2021-05-21T11:19:39
| 2021-05-21T11:19:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
# 7979
# \b([A-Za-z0-9]+)(-|_|\.)?(\w+)?@\w+\.(\w+)?(\.)?(\w+)?(\.)?(\w+)?\b
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"A"*10000+"! _1SLQ_1"
import re2 as re
from time import perf_counter
regex = """\b([A-Za-z0-9]+)(-|_|\.)?(\w+)?@\w+\.(\w+)?(\.)?(\w+)?(\.)?(\w+)?\b"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "A" * i * 10000 + "! _1SLQ_1"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
|
[
"liyt@ios.ac.cn"
] |
liyt@ios.ac.cn
|
4fbe0312706cd863d435ee206d2cd50d864aa244
|
433ca57245fe15afd309323e82f3bdf3287b4831
|
/authentication/urls.py
|
f79cea9e911a931f09d7db1c6c59dadfc0af64d4
|
[] |
no_license
|
greenteamer/ceiling-django
|
db5170faada0f1582c744fa28c638e8671dc2ab9
|
b4a469ae7d2ce6ed36ae51af60633de1fdb43ea4
|
refs/heads/master
| 2020-04-09T19:01:40.273226
| 2018-12-05T14:39:15
| 2018-12-05T14:39:15
| 160,531,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
from django.conf.urls import url
from authentication.views import *
urlpatterns = [
url(r'logout/$', logoutView),
url(r'login/$', loginView),
url(r'register/$', registerView),
]
|
[
"greenteamer@bk.ru"
] |
greenteamer@bk.ru
|
9b252939ed7fd8376f54021b32dab3f487c5ebcb
|
9ac405635f3ac9332e02d0c7803df757417b7fee
|
/permisos/migrations/0002_aditionaldefaultpermission.py
|
5aea9ff8ce03397279fa7f4af97be041d2238bc0
|
[] |
no_license
|
odecsarrollo/07_intranet_proyectos
|
80af5de8da5faeb40807dd7df3a4f55f432ff4c0
|
524aeebb140bda9b1bf7a09b60e54a02f56fec9f
|
refs/heads/master
| 2023-01-08T04:59:57.617626
| 2020-09-25T18:01:09
| 2020-09-25T18:01:09
| 187,250,667
| 0
| 0
| null | 2022-12-30T09:36:37
| 2019-05-17T16:41:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
# Generated by Django 2.0.2 on 2018-02-17 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('permisos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AditionalDefaultPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'permissions': (('list_user', 'Can list user'), ('list_permission', 'Can list permission'), ('detail_user', 'Can see user')),
'managed': False,
},
),
]
|
[
"fabio.garcia.sanchez@gmail.com"
] |
fabio.garcia.sanchez@gmail.com
|
92a9d3a6a54f6b4fa8608f888852f05dc5e91407
|
faa0ce2a95da958be3bfb171cdff29eeb43c3eb6
|
/py-exercises/unicorn-rpg/item/tonics.py
|
27820bee3bba706d563907978d5efc5f887263e1
|
[
"MIT"
] |
permissive
|
julianapeace/digitalcrafts-exercises
|
98fe4e20420c47cf9d92d16c45ac60dc35a49a6a
|
98e6680138d55c5d093164a47da53e1ddb6d064c
|
refs/heads/master
| 2021-08-30T04:17:09.997205
| 2017-12-16T00:22:22
| 2017-12-16T00:22:22
| 103,176,043
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
from item.base import Item
class Tonic(Item):
def equip(self, hero):
print("{}'s health is {} out of {}.".format(
hero.name, hero.health, hero.max_health))
self.pause()
print("{} eats the {}.".format(
hero.name, self.name))
self.pause()
for i in range(3):
print("~ * CHOMP * ~")
self.pause()
hero.health = hero.max_health
print("{}'s health is now {} out of {}!".format(
hero.name, hero.health, hero.max_health))
|
[
"chancecordelia@gmail.com"
] |
chancecordelia@gmail.com
|
d0c478661d612888172e09bd9e3c4ebf31caf74f
|
68e5e2c9a7e9372f536edf3d99847067eb734e75
|
/11-使用django-rest-framework/typeidea/typeidea/typeidea/settings/develop.py
|
9c2b99d4d96bb7a0ad7244b42325d5c3b25ce633
|
[] |
no_license
|
gy0109/Django-enterprise-development-logs--huyang
|
f04d21df6d45f5d2f226760d35e38042f74a7ea8
|
ab4505f8cdaf0c1f9e3635591cd74645a374a73f
|
refs/heads/master
| 2020-05-17T05:24:51.602859
| 2019-05-08T03:42:13
| 2019-05-08T03:42:13
| 183,534,431
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'typeidea',
'USER': 'root',
'PASSWORD': 'gy0109',
'HOST': '127.0.0.1',
'PORT': '3306',
'TEST': {
'CHARSET': 'utf8', #
# 'COLLATION': 'utf8_general_ci',
'NAME': 'mytextdatabase', # 配置单元测试的的数据库
},
# 'CHARSET': 'utf8'
}
}
# debug_toolbar本地系统优化方式的配置
INSTALLED_APPS += [
'debug_toolbar',
'debug_toolbar_line_profiler',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ['127.0.0.1']
# # debug_toobar本地系统优化配置--第三方包panel --火焰图
DEBUG_TOOLBAR_PANELS = [
# 'djdt_flamegraph.FlamegraphPanel', 报错啊
'debug_toolbar_line_profiler.panel.ProfilingPanel',
]
#
|
[
"1974326896@qq.com"
] |
1974326896@qq.com
|
1fa37d8012d63b7f1df5afe32309fb8374bcd5c0
|
0f79fd61dc47fcafe22f83151c4cf5f2f013a992
|
/CodeUp/[059~064] 기초-조건및선택실행구조/060.py
|
922d887c4b510638f2be4dabaac638aa661ab7a6
|
[] |
no_license
|
sangm1n/problem-solving
|
670e119f28b0f0e293dbc98fc8a1aea74ea465ab
|
bc03f8ea9a6a4af5d58f8c45c41e9f6923f55c62
|
refs/heads/master
| 2023-04-22T17:56:21.967766
| 2021-05-05T12:34:01
| 2021-05-05T12:34:01
| 282,863,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
# 세 정수가 입력되었을 때, 짝(even)/홀(odd)을 출력
a, b, c = map(int, input().split())
print('even' if a%2==0 else 'odd')
print('even' if b%2==0 else 'odd')
print('even' if c%2==0 else 'odd')
|
[
"dltkd96als@naver.com"
] |
dltkd96als@naver.com
|
e52284eb219985f90a9b3c75e20f27b97df69abc
|
893597d91fe6de25cdd3e8427c4ebba29d3cabe1
|
/tests/test_scaffold.py
|
af20eb27938d8aea2c9c8159b9a8598d08f22bb7
|
[
"Apache-2.0"
] |
permissive
|
AugustasV/ploomber
|
d51cefd529bdbf6c2bc82485ba77add6bb346f2b
|
b631a1b21da64bb7b9525db1c29c32ee3c0e48b4
|
refs/heads/master
| 2023-08-27T02:22:55.556200
| 2021-10-31T02:52:28
| 2021-10-31T02:52:28
| 423,189,549
| 0
| 0
|
Apache-2.0
| 2021-10-31T15:44:17
| 2021-10-31T15:44:16
| null |
UTF-8
|
Python
| false
| false
| 3,380
|
py
|
from pathlib import Path
import ast
import pytest
from ploomber import tasks
from ploomber import scaffold
@pytest.mark.parametrize('name', ['task.py', 'task.ipynb'])
@pytest.mark.parametrize('extract_upstream', [False, True])
@pytest.mark.parametrize('extract_product', [False, True])
def test_renders_valid_script(name, extract_product, extract_upstream):
loader = scaffold.ScaffoldLoader('ploomber_add')
out = loader.render(name,
params=dict(extract_product=extract_product,
extract_upstream=extract_upstream))
# make sure it generates valid python code, except for the sql template
if not name.endswith('.sql'):
ast.parse(out)
@pytest.mark.parametrize('extract_upstream', [False, True])
@pytest.mark.parametrize('extract_product', [False, True])
def test_renders_valid_function(extract_product, extract_upstream):
loader = scaffold.ScaffoldLoader('ploomber_add')
out = loader.render('function.py',
params=dict(function_name='some_function',
extract_product=extract_product,
extract_upstream=extract_upstream))
module = ast.parse(out)
assert module.body[0].name == 'some_function'
def test_create_function(backup_test_pkg, tmp_directory):
loader = scaffold.ScaffoldLoader('ploomber_add')
loader.create('test_pkg.functions.new_function',
dict(extract_product=False, extract_upstream=True),
tasks.PythonCallable)
code = Path(backup_test_pkg, 'functions.py').read_text()
module = ast.parse(code)
function_names = {
element.name
for element in module.body if hasattr(element, 'name')
}
assert 'new_function' in function_names
def test_add_task_from_scaffold(backup_test_pkg, tmp_directory):
yaml = """
meta:
source_loader:
module: test_pkg
extract_product: True
tasks:
- source: notebook.ipynb
- source: notebook.py
- source: test_pkg.functions.my_new_function
"""
Path('pipeline.yaml').write_text(yaml)
# FIXME: this will fail because TaskSpec validates that the
# dotted path actually exists. I think the cleanest solution
# is to add a special class method for DAGSpec that allows the lazy
# load to skip validating the last attribute...
spec, path_to_spec = scaffold.load_dag()
scaffold.add(spec, path_to_spec)
code = Path(backup_test_pkg, 'functions.py').read_text()
module = ast.parse(code)
function_names = {
element.name
for element in module.body if hasattr(element, 'name')
}
assert 'my_new_function' in function_names
assert Path(backup_test_pkg, 'notebook.ipynb').exists()
assert Path(backup_test_pkg, 'notebook.py').exists()
def test_add_task_when_using_import_tasks_from(tmp_directory):
spec = """
meta:
import_tasks_from: subdir/tasks.yaml
extract_product: True
tasks: []
"""
tasks = """
- source: notebook.py
"""
Path('pipeline.yaml').write_text(spec)
subdir = Path('subdir')
subdir.mkdir()
(subdir / 'tasks.yaml').write_text(tasks)
spec, path_to_spec = scaffold.load_dag()
scaffold.add(spec, path_to_spec)
assert (subdir / 'notebook.py').exists()
|
[
"github@blancas.io"
] |
github@blancas.io
|
c29072f0f943d98e21ef746e7367c84f8a47ff84
|
a908a9f1f6028fe78a5b23621dff4b8fa7047414
|
/flatmap_lv1_larger/constants.py
|
8bb59c011796771343de9fdada9c6a22e6f139a2
|
[] |
no_license
|
ViZDoomBot/stable-baselines-agent
|
502edd5e64c45a6adbe073a22e477b0e41ac213d
|
a76c1c3449dab518462f6a7bc2a0dcb668f08b77
|
refs/heads/main
| 2023-04-27T14:45:27.389418
| 2021-05-05T06:54:51
| 2021-05-05T06:54:51
| 340,531,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : constants.py
# @Author: harry
# @Date : 1/27/21 7:08 PM
# @Desc : some constant definitions
import sys
import os
import vizdoom as vzd
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from common.utils import make_expand_action_f
# atomic actions
NUM_ATOMIC_ACTIONS = 7
MOVE_FORWARD = 0
MOVE_BACKWARD = 1
MOVE_LEFT = 2
MOVE_RIGHT = 3
TURN_LEFT = 4
TURN_RIGHT = 5
ATTACK = 6
_expand_action = make_expand_action_f(NUM_ATOMIC_ACTIONS)
# action space (both atomic and combination actions)
ACTION_LIST = [
_expand_action(),
_expand_action(MOVE_FORWARD),
_expand_action(MOVE_BACKWARD),
_expand_action(MOVE_LEFT),
_expand_action(MOVE_RIGHT),
_expand_action(TURN_LEFT),
_expand_action(TURN_RIGHT),
_expand_action(ATTACK),
_expand_action(MOVE_FORWARD, TURN_LEFT),
_expand_action(MOVE_FORWARD, TURN_RIGHT),
_expand_action(MOVE_LEFT, TURN_RIGHT),
_expand_action(MOVE_RIGHT, TURN_LEFT),
_expand_action(MOVE_FORWARD, ATTACK),
_expand_action(MOVE_BACKWARD, ATTACK),
_expand_action(MOVE_LEFT, ATTACK),
_expand_action(MOVE_RIGHT, ATTACK),
]
CONSTANTS_DICT = {
'scenario_cfg_path': '../scenarios/flatmap_lv1_no_hud.cfg',
'game_args': '-host 1 -deathmatch +sv_forcerespawn 1 +sv_noautoaim 1 '
'+sv_respawnprotect 1 +sv_spawnfarthest 1 +sv_nocrouch 1 +viz_respawn_delay 0 +viz_nocheat 0',
'num_bots': 8,
'action_list': ACTION_LIST,
'num_actions': len(ACTION_LIST),
'resized_height': 120,
'resized_width': 120,
'preprocess_shape': (120, 120),
'extra_features': [vzd.GameVariable.HEALTH, vzd.GameVariable.AMMO5, vzd.GameVariable.ARMOR],
'extra_features_norm_factor': [100.0, 50.0, 200.0],
}
|
[
"zhouziqun@cool2645.com"
] |
zhouziqun@cool2645.com
|
c5b2a1385a8b9cc616d3b0327b4a190a9d888330
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/client/models/v1_load_balancer_ingress.py
|
aa2f3c191214800fe58682c91dad6f3156d6d464
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372
| 2017-10-16T20:19:01
| 2017-10-16T20:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,071
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LoadBalancerIngress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hostname': 'str',
'ip': 'str'
}
attribute_map = {
'hostname': 'hostname',
'ip': 'ip'
}
def __init__(self, hostname=None, ip=None):
"""
V1LoadBalancerIngress - a model defined in Swagger
"""
self._hostname = None
self._ip = None
self.discriminator = None
if hostname is not None:
self.hostname = hostname
if ip is not None:
self.ip = ip
@property
def hostname(self):
"""
Gets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:return: The hostname of this V1LoadBalancerIngress.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:param hostname: The hostname of this V1LoadBalancerIngress.
:type: str
"""
self._hostname = hostname
@property
def ip(self):
"""
Gets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:return: The ip of this V1LoadBalancerIngress.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:param ip: The ip of this V1LoadBalancerIngress.
:type: str
"""
self._ip = ip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LoadBalancerIngress):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
7aaa4e6359d495ce0c93b2480ad757acdcb75af7
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/test/processors/support/PCodeTestControlBlock.pyi
|
cb2ed1addcc5cfe466618a3b60b750d14cce393e
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
pyi
|
from typing import List
import ghidra.program.model.address
import ghidra.test.processors.support
import ghidra.test.processors.support.PCodeTestAbstractControlBlock
import java.lang
class PCodeTestControlBlock(ghidra.test.processors.support.PCodeTestAbstractControlBlock):
"""
PCodeTestControlBlock data is read from each binary test file and
identified by the MAIN_CONTROL_BLOCK_MAGIC 64-bit character field value at the start of the
data structure. Only one instance of this should exist within the binary.
"""
cachedProgramPath: unicode
testFile: ghidra.test.processors.support.PCodeTestFile
def equals(self, __a0: object) -> bool: ...
def getBreakOnDoneAddress(self) -> ghidra.program.model.address.Address: ...
def getBreakOnErrorAddress(self) -> ghidra.program.model.address.Address: ...
def getBreakOnPassAddress(self) -> ghidra.program.model.address.Address: ...
def getClass(self) -> java.lang.Class: ...
@overload
def getFunctionInfo(self, functionIndex: int) -> ghidra.test.processors.support.PCodeTestAbstractControlBlock.FunctionInfo: ...
@overload
def getFunctionInfo(self, functionName: unicode) -> ghidra.test.processors.support.PCodeTestAbstractControlBlock.FunctionInfo: ...
def getInfoStructureAddress(self) -> ghidra.program.model.address.Address: ...
def getNumberFunctions(self) -> int: ...
def getPrintfBufferAddress(self) -> ghidra.program.model.address.Address: ...
def getSprintf5Address(self) -> ghidra.program.model.address.Address: ...
def getTestGroups(self) -> List[ghidra.test.processors.support.PCodeTestGroup]: ...
def getTestResults(self) -> ghidra.test.processors.support.PCodeTestResults: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def breakOnDoneAddress(self) -> ghidra.program.model.address.Address: ...
@property
def breakOnErrorAddress(self) -> ghidra.program.model.address.Address: ...
@property
def breakOnPassAddress(self) -> ghidra.program.model.address.Address: ...
@property
def printfBufferAddress(self) -> ghidra.program.model.address.Address: ...
@property
def sprintf5Address(self) -> ghidra.program.model.address.Address: ...
@property
def testGroups(self) -> List[object]: ...
@property
def testResults(self) -> ghidra.test.processors.support.PCodeTestResults: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
287106fb8446b7e462b44ea9ab651a9c4016c4e0
|
2fac796fa58c67fb5a4a95a6e7f28cbef169318b
|
/python/hash-function.py
|
a3d79b09efd919544f997d3ba614a680411707d1
|
[] |
no_license
|
jwyx3/practices
|
f3fe087432e79c8e34f3af3a78dd10278b66dd38
|
6fec95b9b4d735727160905e754a698513bfb7d8
|
refs/heads/master
| 2021-03-12T20:41:59.816448
| 2019-04-14T06:47:30
| 2019-04-14T06:47:30
| 18,814,777
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
"""
In data structure Hash, hash function is used to convert a string(or any other type) into an integer smaller than hash size and bigger or equal to zero. The objective of designing a hash function is to "hash" the key as unreasonable as possible. A good hash function can avoid collision as less as possible. A widely used hash function algorithm is using a magic number 33, consider any string as a 33 based big integer like follow:
hashcode("abcd") = (ascii(a) * 33^3 + ascii(b) * 33^2 + ascii(c) *33 + ascii(d)) % HASH_SIZE
= (97* 33^3 + 98 * 33^2 + 99 * 33 +100) % HASH_SIZE
= 3595978 % HASH_SIZE
here HASH_SIZE is the capacity of the hash table (you can assume a hash table is like an array with index 0 ~ HASH_SIZE-1).
Given a string as a key and the size of hash table, return the hash value of this key.
"""
class Solution:
"""
@param key: A String you should hash
@param HASH_SIZE: An integer
@return an integer
"""
def hashCode(self, key, HASH_SIZE):
# write your code here
ans = 0
for c in key:
ans = ans * 33 + ord(c)
# this will accelerate computation
ans %= HASH_SIZE
return ans
|
[
"jwyx88003@gmail.com"
] |
jwyx88003@gmail.com
|
aafe5c830dbc10c7fa2a9bf7f557c558070e4f4d
|
ea3897a5ffe63abd7ced5676c9121f6bb970bb97
|
/detransapp/manager/tipo_veiculo.py
|
de59c365c60009669c75c552ea7ba717754d252b
|
[] |
no_license
|
brunowber/transnote2
|
a045bdef18a9b07b70cc74483023dd877728682c
|
7b799a71380aca342e879c5556cc24fcebdac1ca
|
refs/heads/master
| 2020-04-10T08:53:29.613086
| 2018-03-15T19:52:53
| 2018-03-15T19:52:53
| 124,267,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
# coding: utf-8
"""Gerencia os tipos de veiculos"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.core.paginator import Paginator
from django.conf import settings
class TipoVeiculoManager(models.Manager):
"""Classe pra gerenciar os tipos de veiculos"""
def get_page(self, page, procurar):
if procurar is not None and procurar != '':
tipos = self.filter(Q(descricao__icontains=procurar))
else:
tipos = self.filter()
tipos = tipos.order_by('descricao')
paginator = Paginator(tipos, settings.NR_REGISTROS_PAGINA)
try:
tipos_page = paginator.page(page)
except Exception:
tipos_page = paginator.page(paginator.num_pages)
return tipos_page
def get_tipos_veiculo_sicronismo(self, data=None):
if data:
data = datetime.strptime(data, '%d/%m/%Y %H:%M:%S')
query = self.filter(data_alterado__gt=data)
return query.all()
return self.all()
|
[
"bm.morais21@gmail.com"
] |
bm.morais21@gmail.com
|
cfe6d1f97f618d4b6127c93dcdbfc2a3c8f22a1b
|
f2dd3825da3ed8b6e52058453a9340f5330581c2
|
/0x01-python-if_else_loops_functions/6-print_comb3.py
|
126ab1fd63b2a2b9d3402e4a4033068c893f1881
|
[] |
no_license
|
mlaizure/holbertonschool-higher_level_programming
|
855c8b2672b59d27cba4c05ad8a7bb5f30b5d253
|
0d2bcdab97c31e79093497a1783a52e56e10ac6e
|
refs/heads/main
| 2023-04-22T23:20:58.284834
| 2021-05-13T01:00:20
| 2021-05-13T01:00:20
| 319,399,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
#!/usr/bin/python3
for i in range(10):
for j in range(1, 10):
if (i == 8) and (j == 9):
print("{}{}".format(i, j))
elif (j > i):
print("{}{}".format(i, j), end=", ")
|
[
"maddi.laizure@gmail.com"
] |
maddi.laizure@gmail.com
|
1109d7204a3f0bd23def33ad91ac12f7647c0af5
|
cf6d53e0cbf09a57e63967596bc6e9dce7dcb9e7
|
/tests/models/test_models.py
|
2dbc66aa067281880ecfd45709da1eadbd7cea70
|
[
"MIT"
] |
permissive
|
TaiSakuma/acondbs
|
16d3bf32a3bd62a81a8575ed2433844acc4fc2a1
|
990ab44ce4081cc0e04148a8375f7ce7081c2dee
|
refs/heads/master
| 2021-05-26T16:09:39.772512
| 2020-04-21T17:58:41
| 2020-04-21T17:58:41
| 254,131,992
| 0
| 0
|
MIT
| 2020-04-08T15:48:28
| 2020-04-08T15:48:27
| null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from acondbs.db.sa import sa
# __________________________________________________________________||
def test_models(app):
'''test the models declared
'''
expected = {
'simulations', 'simulation_file_paths',
'maps', 'map_file_paths',
'beams', 'beam_file_paths'
}
model_names = sa.Model.metadata.tables.keys()
assert expected == model_names
# __________________________________________________________________||
|
[
"tai.sakuma@gmail.com"
] |
tai.sakuma@gmail.com
|
7720303c24c0be9856fb62ddbf3740ccfd24190a
|
35e95fa04f4ba9d54244503f36fa29dfde8a0f75
|
/docs/source/conf.py
|
29952c93ff0a78a6a61a9b1e39647ceaddba1af7
|
[
"MIT"
] |
permissive
|
sanjana-dev/pMuTT
|
e4d949582337d93620d069a5d0592520b4ccd093
|
2246cffa875b4aa06ba0d7dec99a866b179e2e0e
|
refs/heads/master
| 2020-11-29T16:16:27.337590
| 2019-12-12T16:59:16
| 2019-12-12T16:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,651
|
py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pmutt'
copyright = '2019, Vlachos Research Group'
author = 'Vlachos Research Group'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.2.16'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
#'nbsphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pmuttdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pmutt.tex', 'pmutt Documentation',
'Vlachos Research Group', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pmutt', 'pmutt Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pmutt', 'pmutt Documentation',
author, 'pmutt', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Napoleon settings
napoleon_google_docstring = True
# Latex equations options
imgmath_image_format = 'svg'
imgmath_font_size = 18
# Logo
html_logo = './logos/pmutt_inverse_icon.png'
html_favicon = './logos/p_icon.ico'
|
[
"jonathanalym@gmail.com"
] |
jonathanalym@gmail.com
|
e6a7d567f745661c1573b2541231bfcb08a5de43
|
5c335469a9198d61e2095293d06ce78e781726d0
|
/python/Semester 1/Tutorials/Tutorial 10/Problem5.py
|
0009cf6d74244aeb60117c57758a3646c01cb532
|
[] |
no_license
|
LaurenceGA/programmingProjects
|
5e203b450d11bff9cdb652661934ec3f797a6860
|
1fe3ea9a89be4c32cd68dd46da7a842b933c438b
|
refs/heads/master
| 2021-01-15T10:59:09.890932
| 2017-06-18T11:20:28
| 2017-06-18T11:20:28
| 36,592,569
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
#!/usr/bin/env python
__author__ = 'Laurence Armstrong'
authorship_string = "{} created on {} by {} ({})\n{}\n".format(
"Problem5.py", "25/05/15", __author__, 15062061, "-----" * 15) \
if __name__ == '__main__' else ""
print(authorship_string, end="")
def get_details():
title = input("Title: ")
if title.lower() == 'exit':
return None
cost = input("Cost: ")
new_dict = {
'title': title,
'cost': cost
}
return new_dict
item = {}
items = []
while item is not None:
if item is not None:
item = get_details()
items.append(item)
for i in items:
print(i)
|
[
"lorryarmstrong@gmail.com"
] |
lorryarmstrong@gmail.com
|
853343599b84552784d51c77d51a4ebb35546e67
|
0232863fe1e0111b7b8641ef720b888584ed5ab3
|
/venv/bin/django-admin
|
91812a8852cc20c5950d49c2be084f3044b867b0
|
[] |
no_license
|
Smikhalcv/orm_migrations
|
fbc90322414e0e77707afa74410daf7c2e7bc703
|
1431d83f0bdddea3b0801556e9e35513dd121727
|
refs/heads/master
| 2022-12-04T10:11:21.180579
| 2020-08-31T10:14:21
| 2020-08-31T10:14:21
| 291,681,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
#!/home/dell-ubuntu/Документы/Python/django/orm_migrations/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"Smikhalcv@yandex.ru"
] |
Smikhalcv@yandex.ru
|
|
5431783e06c845aae4765079bfd50eda31b68404
|
af29d4d447f03485b7b736a914e5620148e17a09
|
/FirstPy/game0.py
|
31ffa46fe7f3c4649b4e360e087b1a28a04efc27
|
[] |
no_license
|
Flyhiee/YesPython
|
e2649f40e083d2a609fbc37619e68b5bc8d46482
|
642fc4d2d6b8c1f68607823a9dd03fa18ef014c0
|
refs/heads/master
| 2020-04-16T09:43:43.203844
| 2019-01-31T03:11:09
| 2019-01-31T03:11:09
| 165,474,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
print("Welcome!")
g = input("Guess the number: ")
guess = int(g)
if guess == 5:
print("You win!")
else:
print("You lose!")
print("Game over!")
|
[
"Aps@localhost.localdomain"
] |
Aps@localhost.localdomain
|
1eaf182b0e73247e26c9e8358da022870f789f95
|
3649dce8b44c72bbfee56adf4e29ca6c5ba2703a
|
/code_up2767.py
|
ae839227919c462ed08a8a7aa67ef53ff3a18735
|
[] |
no_license
|
beOk91/code_up
|
03c7aca76e955e3a59d797299749e7fc2457f24a
|
ca1042ce216cc0a80e9b3d3ad363bc29c4ed7690
|
refs/heads/master
| 2022-12-06T08:23:00.788315
| 2020-08-20T11:21:59
| 2020-08-20T11:21:59
| 284,844,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
k,n=map(int,input().strip().split())
k_list=list(map(int,input().strip().split()))
for i in range(k,n):
sum=0
for j in range(i-1,i-k-1,-1):
sum+=k_list[j]
k_list.append(sum)
print(k_list[n-1]%100007)
"""
2 3
7 7 14
1,
"""
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
aeb0b56dea0c1e975ee0bc44ba268aa0fa073b6d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/SdGE4ZBtuMKyxDqQ6_11.py
|
2e4abab70e2ebb9141017db55244f13d9a4fe0c1
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
"""
Create a function that takes a string and returns the first character that
repeats. If there is no repeat of a character, then return "-1".
### Examples
first_repeat("legolas") ➞ "l"
first_repeat("Gandalf") ➞ "a"
first_repeat("Balrog") ➞ "-1"
first_repeat("Isildur") ➞ "-1"
### Notes
Tests are case sensitive.
"""
def first_repeat(chars):
unique = []
for n in chars:
if n not in unique:
unique.append(n)
else:
return n
return '-1'
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
53bce614ae220236a08bbd5dc7b205c4aed9d7a1
|
3777658387aa9e78d7c04202d7fd47d59b9e1271
|
/images/corner_detection.py
|
abf9f4ccfd6fa888532ab46528130cf60a510f7f
|
[] |
no_license
|
jocoder22/PythonDataScience
|
709363ada65b6db61ee73c27d8be60587a74f072
|
c5a9af42e41a52a7484db0732ac93b5945ade8bb
|
refs/heads/master
| 2022-11-08T17:21:08.548942
| 2022-10-27T03:21:53
| 2022-10-27T03:21:53
| 148,178,242
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import matplotlib.image as mpimg
import cv2
from skimage import io, exposure
from skimage.feature import hog
def print2(*args):
for arg in args:
print(arg, end='\n\n')
sp = {"sep":"\n\n", "end":"\n\n"}
# url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTEc92kYxyNsx6ZxWYF6KJJz-QZWUj0jXBleB2tEg6yBekggb28'
url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSESxr13ODvh5lfb1TxT8LgDbzWP44sD5n1z_Nf-697su_jona3zw'
url2 = "https://avatars2.githubusercontent.com/u/31042629?s=400&v=4"
# load image
# imgg = cv2.imread('car22.jpg')
imgg = io.imread(url2)
# converting to gray scale
Img_gray = cv2.cvtColor(imgg, cv2.COLOR_BGR2GRAY)
# remove noise
img = cv2.GaussianBlur(Img_gray,(7,7),0)
img_c = cv2.cornerHarris(img, 3, 5, 0.1)
img_dilate = cv2.dilate(img_c, np.ones((5, 5), np.uint8), iterations=1)
print(img_dilate.max(), **sp)
Img_gray2 = imgg.copy()
Img_gray2[img_dilate > 0.02 * img_dilate.max()] = [255, 0, 0]
cv2.imshow('lamborghini_with_Corners', Img_gray2)
cv2.waitKey()
cv2.destroyAllWindows()
plt.imshow(Img_gray2)
plt.axis('off')
plt.show()
features, hog_img = hog(Img_gray,visualize=True,
pixels_per_cell=(9, 9), cells_per_block=(2, 2))
img_hog = exposure.rescale_intensity(hog_img, in_range=(0, 2))
plt.imshow(img_hog)
plt.axis('off')
plt.show()
|
[
"okigbookey@gmail.com"
] |
okigbookey@gmail.com
|
2e4d94adf3df80850558430a6d6e600a9f3aa1b7
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/contrib/timeseries/python/timeseries/model_utils.py
|
249975a83e3669875aa693d97311fff716dbf8b7
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 4,115
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for training and constructing time series Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# TODO(agarwal): Remove and replace with functionality from tf.slim
def fully_connected(inp,
inp_size,
layer_size,
name,
activation=nn_ops.relu,
dtype=dtypes.float32):
"""Helper method to create a fully connected hidden layer."""
wt = variable_scope.get_variable(
name="{}_weight".format(name), shape=[inp_size, layer_size], dtype=dtype)
bias = variable_scope.get_variable(
name="{}_bias".format(name),
shape=[layer_size],
initializer=init_ops.zeros_initializer())
output = nn_ops.xw_plus_b(inp, wt, bias)
if activation is not None:
assert callable(activation)
output = activation(output)
return output
def parameter_switch(parameter_overrides):
"""Create a function which chooses between overridden and model parameters.
Args:
parameter_overrides: A dictionary with explicit overrides of model
parameters, mapping from Tensors to their overridden values.
Returns:
A function which takes a Tensor and returns the override if it is specified,
or otherwise the evaluated value (given current Variable values).
"""
def get_passed_or_trained_value(parameter):
return ops.convert_to_tensor(
parameter_overrides.get(parameter, parameter)).eval()
return get_passed_or_trained_value
def canonicalize_times_or_steps_from_output(times, steps,
previous_model_output):
"""Canonicalizes either relative or absolute times, with error checking."""
if steps is not None and times is not None:
raise ValueError("Only one of `steps` and `times` may be specified.")
if steps is None and times is None:
raise ValueError("One of `steps` and `times` must be specified.")
if times is not None:
times = numpy.array(times)
if len(times.shape) != 2:
times = times[None, ...]
if (previous_model_output[feature_keys.FilteringResults.TIMES].shape[0] !=
times.shape[0]):
raise ValueError(
("`times` must have a batch dimension matching"
" the previous model output (got a batch dimension of {} for `times`"
" and {} for the previous model output).").format(
times.shape[0], previous_model_output[
feature_keys.FilteringResults.TIMES].shape[0]))
if not (previous_model_output[feature_keys.FilteringResults.TIMES][:, -1] <
times[:, 0]).all():
raise ValueError("Prediction times must be after the corresponding "
"previous model output.")
if steps is not None:
predict_times = (
previous_model_output[feature_keys.FilteringResults.TIMES][:, -1:] + 1 +
numpy.arange(steps)[None, ...])
else:
predict_times = times
return predict_times
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
feff111ff9f8504c1e9fe1ed1302d348f5120e23
|
63c261c8bfd7c15f6cdb4a08ea2354a6cd2b7761
|
/acaizerograu/acaizerograu/env/Scripts/pilfile.py
|
1261f3a53d2c0e74d9317192e9b38c5f23dc8bb5
|
[] |
no_license
|
filhosdaputa/AcaiZero
|
93295498d95bcc13d020f2255e6b87a12cff04bf
|
99a775f823d98a0b7b10e685936f1c12ccd1a70a
|
refs/heads/master
| 2022-10-29T05:31:10.512990
| 2017-08-11T13:49:06
| 2017-08-11T13:49:06
| 149,019,853
| 0
| 1
| null | 2022-10-18T00:41:16
| 2018-09-16T17:38:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
#!C:\Users\IGOR\Source\Repos\AcaiZero\acaizerograu\acaizerograu\env\Scripts\python.exe
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
|
[
"igor-peres@hotmail.com"
] |
igor-peres@hotmail.com
|
9bb7f7bcd1ff8fe2c8ca5d83a2b9b489a473dba8
|
0a3e24df172a206a751217e5f85b334f39983101
|
/Design Pattern/mytest.py
|
16a6f34bcba5218fc479b4cc79867dbbf33df36d
|
[] |
no_license
|
yeboahd24/python202
|
1f399426a1f46d72da041ab3d138c582c695462d
|
d785a038183e52941e0cee8eb4f6cedd3c6a35ed
|
refs/heads/main
| 2023-05-06T04:14:19.336839
| 2021-02-10T02:53:19
| 2021-02-10T02:53:19
| 309,841,303
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
class AgeValidator(object):
"""docstring for Student"""
def __init__(self, age):
self._age = age
self._name = None
def __get__(self, instance, owner):
if self._age is None:
raise ValueError(f'{self._age}: is not set')
if self._age < 18:
raise ValueError(f'{self._age} must be greater than or equal to 18')
return self._age
def __set_name__(self, name, owner):
self._name = name
def __set__(self, instance, value):
self._age = value
class Client:
age = AgeValidator(19) # default value for now
def __init__(self, age):
self.age = age
def foo(self):
return self.age
t = Client(7)
t.age = 2
print(t.foo())
# Error occures because the AgeValidator is acting as decorator here
# but you can you use getter and setter property but if you realize that you
# you will need the validator in most of your class then implement descriptors, that is my
# openion, so that you can reuse this validator without trying to use getter and setter
# througout your class.
# this makes you avoid DRY
|
[
"noreply@github.com"
] |
yeboahd24.noreply@github.com
|
b335745902329e09d03ed7d0b83fc9fbebbc4a2c
|
47b4a652bf47afbff07a7148c3b4a94b86f85bb2
|
/swap_start/auto_playok_com/debug/debug_state.py
|
860ae0d1fec53e1bcb3151062b77c6f9bf99d71d
|
[
"MIT"
] |
permissive
|
yudongqiu/gomoku
|
3423253dcac52c0b738249900f1e86b31ca99524
|
4a95f2a5008f31fed5cb92c6bd6d55f9669ddd06
|
refs/heads/master
| 2022-10-28T09:16:38.939698
| 2021-01-30T21:01:48
| 2021-01-30T21:01:48
| 84,468,572
| 3
| 1
|
MIT
| 2022-09-30T09:03:45
| 2017-03-09T17:12:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
# coding: utf-8
import numpy as np
import AI_debug
from tf_model import load_existing_model
board_size=15
def read_board_state(f):
# default
black_stones = set()
white_stones = set()
board = [black_stones, white_stones]
last_move = None
playing = 0
# read and parse board
for line in open(f):
if '|' in line:
line_idx, contents = line.split('|', maxsplit=1)
row_i = int(line_idx) - 1
ls = contents.split()
if len(ls) == board_size:
for col_j, s in enumerate(ls):
stone = (row_i+1, col_j+1)
if s == 'x':
black_stones.add(stone)
elif s == 'X':
black_stones.add(stone)
last_move = stone
playing = 1
elif s == 'o':
white_stones.add(stone)
elif s == 'O':
white_stones.add(stone)
last_move = stone
playing = 0
elif s == '-':
pass
else:
print(f'found unknown stone: {s}')
board_state = [board, last_move, playing, board_size]
return board_state
board_state = read_board_state('debug_board.txt')
model = load_existing_model('tf_model.h5')
AI_debug.tf_predict_u.model = model
AI_debug.initialize()
print(AI_debug.strategy(board_state))
|
[
"saviorbruce@gmail.com"
] |
saviorbruce@gmail.com
|
825c81bfb2926ce343f78e46f9a4f7df0c279581
|
65dd982b7791b11b4f6e02b8c46300098d9b5bb3
|
/heat-2014.2.1/heat/openstack/common/strutils.py
|
bd690b0829141d35b2aca22297f15e92314c9359
|
[
"Apache-2.0"
] |
permissive
|
xiongmeng1108/openstack_gcloud
|
83f58b97e333d86d141493b262d3c2261fd823ac
|
d5d3e4f8d113a626f3da811b8e48742d35550413
|
refs/heads/master
| 2021-01-10T01:21:13.911165
| 2016-03-25T08:21:14
| 2016-03-25T08:21:14
| 54,700,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,007
|
py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from heat.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
|
[
"cs_xm@126.com"
] |
cs_xm@126.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.