blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dac47fd1c86e26a2e0023d0e42e73fb5c23b8db6
|
6929f0ddaf7aab3897ecd2aefd55d97c8ab1e431
|
/QandA/wsgi.py
|
2b85f2b31520b17db7efa9b223e73bf9469f7b74
|
[] |
no_license
|
tmvinoth3/Q-A
|
1aaab03b011478a6e33d0be1a78cc35a9631c60e
|
6da8bdd0c63453df33fe347ef7bb97e40e4218a7
|
refs/heads/master
| 2020-03-10T13:50:21.766223
| 2018-06-06T16:17:29
| 2018-06-06T16:17:29
| 129,409,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
"""
WSGI config for QandA project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "QandA.settings")
application = get_wsgi_application()
|
[
"575435@cognizant.com"
] |
575435@cognizant.com
|
e8e08e4b4c84e23d22c92940cf1d38e721e9617e
|
dc80f94c1a244002db468fc7242d5fcaafe439dc
|
/powerdns_client/api/stats_api.py
|
865ce4494cac1c4a36ceedb5e0f8587189c76576
|
[
"MIT"
] |
permissive
|
sanvu88/python-powerdns-client
|
f675e1ee162bb76190b41ddf0cfc34e2305a757b
|
57dd0460995a5407c6f5c963553b4df0f4859667
|
refs/heads/master
| 2023-02-04T07:05:31.095951
| 2020-12-15T16:48:15
| 2020-12-15T16:48:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,803
|
py
|
# coding: utf-8
"""
PowerDNS Authoritative HTTP API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.0.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from powerdns_client.api_client import ApiClient
class StatsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_stats(self, server_id, **kwargs): # noqa: E501
"""Query statistics. # noqa: E501
Query PowerDNS internal statistics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stats(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str statistic: When set to the name of a specific statistic, only this value is returned. If no statistic with that name exists, the response has a 422 status and an error message.
:param bool includerings: “true” (default) or “false”, whether to include the Ring items, which can contain thousands of log messages or queried domains. Setting this to ”false” may make the response a lot smaller.
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_stats_with_http_info(server_id, **kwargs) # noqa: E501
else:
(data) = self.get_stats_with_http_info(server_id, **kwargs) # noqa: E501
return data
def get_stats_with_http_info(self, server_id, **kwargs): # noqa: E501
"""Query statistics. # noqa: E501
Query PowerDNS internal statistics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stats_with_http_info(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str statistic: When set to the name of a specific statistic, only this value is returned. If no statistic with that name exists, the response has a 422 status and an error message.
:param bool includerings: “true” (default) or “false”, whether to include the Ring items, which can contain thousands of log messages or queried domains. Setting this to ”false” may make the response a lot smaller.
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['server_id', 'statistic', 'includerings'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'server_id' is set
if ('server_id' not in params or
params['server_id'] is None):
raise ValueError("Missing the required parameter `server_id` when calling `get_stats`") # noqa: E501
collection_formats = {}
path_params = {}
if 'server_id' in params:
path_params['server_id'] = params['server_id'] # noqa: E501
query_params = []
if 'statistic' in params:
query_params.append(('statistic', params['statistic'])) # noqa: E501
if 'includerings' in params:
query_params.append(('includerings', params['includerings'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/servers/{server_id}/statistics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[object]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"67791576+underline-bot@users.noreply.github.com"
] |
67791576+underline-bot@users.noreply.github.com
|
7a90a3c285d5b1d163f9550befa75c5b01f6fdc4
|
0b3c5260cd5c33a1beccc5710a5d0fd097a5ea15
|
/anchore_engine/services/policy_engine/engine/policy/gates/npm_check.py
|
40e0d49fe309d0fdfc2a14343f4df6cec46099e9
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
omerlh/anchore-engine
|
fb2d7cb3d8bd259f6c973b450fbaa2c2e00497f0
|
669a0327f8baaee3f5c7c64b482909fe38830d80
|
refs/heads/master
| 2021-09-02T12:48:51.661648
| 2018-01-02T19:26:47
| 2018-01-02T19:26:47
| 116,236,136
| 1
| 0
| null | 2018-01-04T08:41:39
| 2018-01-04T08:41:39
| null |
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
from anchore_engine.services.policy_engine.engine.policy.gate import Gate, BaseTrigger
from anchore_engine.services.policy_engine.engine.policy.utils import NameVersionListValidator, CommaDelimitedStringListValidator, barsplit_comma_delim_parser, delim_parser
from anchore_engine.db import NpmMetadata
from anchore_engine.services.policy_engine.engine.logs import get_logger
from anchore_engine.services.policy_engine.engine.feeds import DataFeeds
log = get_logger()
# TODO; generalize these for any feed, with base classes and children per feed type
FEED_KEY = 'npm'
NPM_LISTING_KEY = 'npms'
NPM_MATCH_KEY = 'matched_feed_npms'
class NotLatestTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOTLATEST'
__description__ = 'triggers if an installed NPM is not the latest version according to NPM data feed'
def evaluate(self, image_obj, context):
"""
Fire for any npm in the image that is in the official npm feed but is not the latest version.
Mutually exclusive to NPMNOTOFFICIAL and NPMBADVERSION
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.latest for p in feed_npms}
for npm, versions in img_npms.items():
if npm not in feed_names:
continue # Not an official
for v in versions:
if v and v != feed_names.get(npm):
self._fire("NPMNOTLATEST Package ("+npm+") version ("+v+") installed but is not the latest version ("+feed_names[npm]['latest']+")")
class NotOfficialTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOTOFFICIAL'
__description__ = 'triggers if an installed NPM is not in the official NPM database, according to NPM data feed'
def evaluate(self, image_obj, context):
"""
Fire for any npm that is not in the official npm feed data set.
Mutually exclusive to NPMNOTLATEST and NPMBADVERSION
:param image_obj:
:param context:
:return:
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.versions_json for p in feed_npms}
for npm in img_npms.keys():
if npm not in feed_names:
self._fire(msg="NPMNOTOFFICIAL Package ("+str(npm)+") in container but not in official NPM feed.")
class BadVersionTrigger(BaseTrigger):
__trigger_name__ = 'NPMBADVERSION'
__description__ = 'triggers if an installed NPM version is not listed in the official NPM feed as a valid version'
def evaluate(self, image_obj, context):
"""
Fire for any npm that is in the official npm set but is not one of the official versions.
Mutually exclusive to NPMNOTOFFICIAL and NPMNOTLATEST
:param image_obj:
:param context:
:return:
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.versions_json for p in feed_npms}
for npm, versions in img_npms.items():
if npm not in feed_names:
continue
non_official_versions = set(versions).difference(set(feed_names.get(npm, [])))
for v in non_official_versions:
self._fire(msg="NPMBADVERSION Package ("+npm+") version ("+v+") installed but version is not in the official feed for this package ("+str(feed_names.get(npm, '')) + ")")
class PkgFullMatchTrigger(BaseTrigger):
__trigger_name__ = 'NPMPKGFULLMATCH'
__description__ = 'triggers if the evaluated image has an NPM package installed that matches one in the list given as a param (package_name|vers)'
__params__ = {
'BLACKLIST_NPMFULLMATCH': NameVersionListValidator()
}
def evaluate(self, image_obj, context):
"""
Fire for any npm that is on the blacklist with a full name + version match
:param image_obj:
:param context:
:return:
"""
npms = image_obj.npms
if not npms:
return
pkgs = context.data.get(NPM_LISTING_KEY)
if not pkgs:
return
for pkg, vers in barsplit_comma_delim_parser(self.eval_params.get('BLACKLIST_NPMFULLMATCH', '')).items():
try:
if pkg in pkgs and vers in pkgs.get(pkg, []):
self._fire(msg='NPMPKGFULLMATCH Package is blacklisted: '+pkg+"-"+vers)
except Exception as e:
continue
class PkgNameMatchTrigger(BaseTrigger):
__trigger_name__ = 'NPMPKGNAMEMATCH'
__description__ = 'triggers if the evaluated image has an NPM package installed that matches one in the list given as a param (package_name)'
__params__ = {
'BLACKLIST_NPMNAMEMATCH': CommaDelimitedStringListValidator()
}
def evaluate(self, image_obj, context):
npms = image_obj.npms
if not npms:
return
pkgs = context.data.get(NPM_LISTING_KEY)
if not pkgs:
return
for match_val in delim_parser(self.eval_params.get('BLACKLIST_NPMNAMEMATCH', '')):
if match_val and match_val in pkgs:
self._fire(msg='NPMPKGNAMEMATCH Package is blacklisted: ' + match_val)
class NoFeedTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOFEED'
__description__ = 'triggers if anchore does not have access to the NPM data feed'
def evaluate(self, image_obj, context):
try:
feed_meta = DataFeeds.instance().packages.group_by_name(FEED_KEY)
if feed_meta and feed_meta[0].last_sync:
return
except Exception as e:
log.exception('Error determining feed presence for npms. Defaulting to firing trigger')
self._fire()
return
class NpmCheckGate(Gate):
__gate_name__ = "NPMCHECK"
__triggers__ = [
NotLatestTrigger,
NotOfficialTrigger,
BadVersionTrigger,
PkgFullMatchTrigger,
PkgNameMatchTrigger,
NoFeedTrigger
]
def prepare_context(self, image_obj, context):
"""
Prep the npm names and versions
:param image_obj:
:param context:
:return:
"""
if not image_obj.npms:
return context
context.data[NPM_LISTING_KEY] = {p.name: p.versions_json for p in image_obj.npms}
npms = context.data[NPM_LISTING_KEY].keys()
context.data[NPM_MATCH_KEY] = []
chunks = [npms[i: i+100] for i in xrange(0, len(npms), 100)]
for key_range in chunks:
context.data[NPM_MATCH_KEY] += context.db.query(NpmMetadata).filter(NpmMetadata.name.in_(key_range)).all()
return context
|
[
"nurmi@anchore.com"
] |
nurmi@anchore.com
|
f711172c3480c5580dd6594014f2a13fb124054c
|
f26dd860c8d764fc7a47bde656f393795cd8d763
|
/david13.py
|
f0f78ee556259290f4fcefbd2eb9801ee2858e03
|
[] |
no_license
|
chokkuu1998/david
|
8e9fa162f657c8b9bb55502f1cdd730a08ff0235
|
4dc999cdb73383b5a5d7ed3d98b2c1a4d6b5f7ee
|
refs/heads/master
| 2020-03-28T17:05:04.046963
| 2019-07-16T08:07:37
| 2019-07-16T08:07:37
| 148,756,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
AA,BB=map(int,input().split())
CC=list(map(int,input().split()))
pp=list(map(int,input().split()))
qq=[]
rr=0
for i in range(AA):
x=pp[i]/C[i]
qq.append(x)
while B>=0 and len(qq)>0:
mindex=qq.index(max(qq))
if B>=C[mindex]:
rr=rr+pp[mindex]
B=B-C[mindex]
CC.pop(mindex)
pp.pop(mindex)
qq.pop(mindex)
print(rr)
|
[
"noreply@github.com"
] |
chokkuu1998.noreply@github.com
|
b70385e17427bd7ad30abd8179b7962f293e20f5
|
5837e04e53e0434c8b10eb9647804901d3a6ee7a
|
/pyseries/metrics/__init__.py
|
77d6184c5b0dba8ce8889cd431ef362b1d01afb2
|
[
"BSD-3-Clause"
] |
permissive
|
nubiofs/pyseries
|
b26fd4dff4b55cc3b338a2ebee9260b91d6fa902
|
59c8a321790d2398d71305710b7d322ce2d8eaaf
|
refs/heads/master
| 2020-04-05T14:18:35.453540
| 2014-10-27T17:47:54
| 2014-10-27T17:47:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
# -*- coding: utf8
from __future__ import division, print_function
'''
Array and time series metrics package
'''
|
[
"flaviovdf@gmail.com"
] |
flaviovdf@gmail.com
|
173607b64aeed04b631c469061ed4f527766472f
|
c5d5d65dcb782618428c8ee3d486d0bd7cd817e5
|
/manage.py
|
98969a8eb96a066bbb764e4404ff0a598e35cd16
|
[
"MIT"
] |
permissive
|
ruslan447/houseofoutfits
|
a3471a48179f4a62fb3858c7386e32f965381b88
|
96d4bd3a24974f4e96dc8e0bc510afb63e2d3f7c
|
refs/heads/master
| 2022-12-04T11:48:12.466046
| 2020-08-12T09:10:08
| 2020-08-12T09:10:08
| 285,194,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'outfease.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
ruslan447.noreply@github.com
|
d95f0b89899c28fd7e790e02a64cba46aff3d59d
|
1ad2ae0383341f2b92fe38173612be5d9c4970e8
|
/polls/models.py
|
75a460d4f5e68fc9d5052737ed7677900239b83f
|
[
"MIT"
] |
permissive
|
pizzapanther/ppp
|
9b0df90ddf2e52ffdaf43394026613dbd884c0e9
|
3286f39f8e90f3473841a154ff7189a3efd9ca94
|
refs/heads/master
| 2021-09-23T03:52:27.915606
| 2020-03-04T18:04:08
| 2020-03-04T18:04:08
| 222,154,111
| 0
| 0
|
MIT
| 2021-09-22T18:10:01
| 2019-11-16T20:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
from django.conf import settings
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Presentation(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
def __str__(self):
return self.title
def current(self):
return self.poll_set.filter(live=True).first()
class Poll(models.Model):
question = models.CharField(max_length=254)
choices = ArrayField(models.CharField(max_length=254))
live = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
presentation = models.ForeignKey(Presentation, on_delete=models.SET_NULL, blank=True, null=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.question
def json_data(self):
votes = []
for (i, choice) in enumerate(self.choices):
votes.append(self.vote_set.filter(choice=i).count())
return {
'id': self.id,
'slug': self.presentation.slug,
'question': self.question,
'choices': self.choices,
'votes': votes,
'total': self.vote_set.all().count(),
}
class Vote(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
choice = models.PositiveSmallIntegerField()
def __str__(self):
return f'{self.poll} - {self.user}'
|
[
"paul.m.bailey@gmail.com"
] |
paul.m.bailey@gmail.com
|
97b53eda25175c28fc9aca24c186c191c00e164a
|
3c5e7e507baca827a367be2c4e85655c7c590162
|
/jogos.py
|
1e37db040c39233b65dd2c910ac6eb719f915bc4
|
[] |
no_license
|
devlaurindo/jogos-em-python
|
7608c3aa7c124fedd8f395e9181fa1b778515095
|
e8409d81c702aeed633f8fbffec94049561f1f63
|
refs/heads/master
| 2023-04-10T18:13:22.146064
| 2021-04-28T17:48:42
| 2021-04-28T17:48:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
import adivinhacao
import forca
def escolhe_jogo():
print ("***********************************")
print ("******* Escolha o seu jogo! *******!")
print ("***********************************")
print ("\n (1) Adivinhação (2) Forca ")
jogo = int (input ("\n Qual jogo? "))
if (jogo == 1):
print ("\n Jogando Adivinhação")
adivinhacao.jogar()
elif (jogo == 2):
print ("\nJogando Forca")
forca.jogar()
if (__name__ == "__main__"):
escolhe_jogo()
|
[
"user-t00r@hotmail.com"
] |
user-t00r@hotmail.com
|
fb80cd57ad70d4873721a45f8d1e755d18cc92c6
|
9e1bb84206587a5efd352a9ffbdcd383b150a4d9
|
/day10/led/controllers/__init__.py
|
b2dd8b13f44a94aa8242765761ca7a03d0e1e834
|
[
"Apache-2.0"
] |
permissive
|
smalljiny/raspi-class-example
|
34ef92d7a32943fee34c6534ccf30f411e9d05eb
|
7f92f34d366b94f9ae3c7da5ebeacdeb628be446
|
refs/heads/master
| 2021-09-02T16:52:57.592216
| 2018-01-03T17:55:47
| 2018-01-03T17:55:47
| 114,616,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from flask import Flask
from controllers.leds.controllers import leds
app = Flask(__name__)
app.debug = True
app.register_blueprint(leds, url_prefix='/leds')
|
[
"smalljiny@gmail.com"
] |
smalljiny@gmail.com
|
64618a6ac65022117f48efe65d74d536eb1d4461
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/AntfortuneEquityShopCustrelationQueryRequest.py
|
fa4fa39402e4465d04ccbe7c01ba6dec5c1768fa
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AntfortuneEquityShopCustrelationQueryModel import AntfortuneEquityShopCustrelationQueryModel
class AntfortuneEquityShopCustrelationQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AntfortuneEquityShopCustrelationQueryModel):
self._biz_content = value
else:
self._biz_content = AntfortuneEquityShopCustrelationQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'antfortune.equity.shop.custrelation.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
c6ad8989868b3f799fb471f965dd0dbac43eda21
|
3932fd64580d6c04543d02e7675fc6587bf31214
|
/src/border_handling.py
|
ad2c49f30822ba44271b7c6ac5e3e44a6eb38696
|
[] |
no_license
|
RosePY/ASCIIArt
|
e06bcc4d2e09007833ceb8b413251852f5576420
|
0985724f3d4fc39ad9a6c430d80d1a35de787d6d
|
refs/heads/master
| 2021-01-09T18:12:50.449403
| 2020-02-22T20:27:14
| 2020-02-22T20:27:14
| 242,402,941
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,966
|
py
|
import numpy as np
class BorderHandling:
BORDER_REPLICATE = 1
BORDER_REFLECT = 2
BORDER_REFLECT_101 = 3
BORDER_WRAP = 4
BORDER_CONSTANT = 5
BORDER_DEFAULT = BORDER_REFLECT_101
@staticmethod
def border_handling(img_in, kernel, border_type=BORDER_DEFAULT):
rows, cols = img_in.shape[:2]
kernel_rows, kernel_cols = kernel.shape
kernel_center_row, kernel_center_col = int(kernel_rows / 2), int(kernel_cols / 2)
img_out = np.zeros((kernel_rows + rows - 1, kernel_cols + cols - 1))
if border_type == BorderHandling.BORDER_REPLICATE:
return BorderHandling.border_replicate(img_out, img_in, kernel_center_row, kernel_center_col)
elif border_type == BorderHandling.BORDER_REFLECT:
return BorderHandling.border_reflect(img_out, img_in, kernel_center_row, kernel_center_col)
elif border_type == BorderHandling.BORDER_REFLECT_101:
return BorderHandling.border_reflect_101(img_out, img_in, kernel_center_row, kernel_center_col)
elif border_type == BorderHandling.BORDER_WRAP:
return BorderHandling.border_wrap(img_out, img_in, kernel_center_row, kernel_center_col)
elif border_type == BorderHandling.BORDER_CONSTANT:
return BorderHandling.border_constant(img_in, kernel_cols, kernel_rows, kernel_cols, kernel_center_row, kernel_center_col, 0)
@staticmethod
def border_replicate(img_out, img_in, kernel_center_row, kernel_center_col):
rows, cols = img_in.shape[:2]
# corner squares
img_out[:kernel_center_row, :kernel_center_col] = img_in[0, 0] * np.ones((kernel_center_row, kernel_center_col), dtype=np.float32)
img_out[:kernel_center_row, -kernel_center_col:] = img_in[0, (cols - 1)] * np.ones((kernel_center_row, kernel_center_col), dtype=np.float32)
img_out[-kernel_center_row:, :kernel_center_col] = img_in[(rows - 1), 0] * np.ones((kernel_center_row, kernel_center_col), dtype=np.float32)
img_out[-kernel_center_row:, -kernel_center_col:] = img_in[(rows - 1), (cols - 1)] * np.ones((kernel_center_row, kernel_center_col), dtype=np.float32)
# sides
img_out[:kernel_center_row, kernel_center_col:-kernel_center_col] = np.array([img_in[0, :], ] * kernel_center_row)
img_out[kernel_center_row:-kernel_center_row, :kernel_center_col] = np.array([img_in[:, 0], ] * kernel_center_col).transpose()
img_out[kernel_center_row:-kernel_center_row, -kernel_center_col:] = np.array([img_in[:, (cols - 1)], ] * kernel_center_col).transpose()
img_out[-kernel_center_row:, kernel_center_col:-kernel_center_col] = np.array([img_in[(rows - 1), :], ] * kernel_center_row)
# original image
img_out[kernel_center_row:-kernel_center_row, kernel_center_col:-kernel_center_col] = img_in
return img_out
@staticmethod
def border_reflect(img_out, img_in, kernel_center_row, kernel_center_col):
# corner squares
img_out[:kernel_center_row, :kernel_center_col] = np.rot90(img_in[:kernel_center_row, :kernel_center_col], 2)
img_out[:kernel_center_row, -kernel_center_col:] = np.rot90(img_in[:kernel_center_row, -kernel_center_col:], 2)
img_out[-kernel_center_row:, :kernel_center_col] = np.rot90(img_in[-kernel_center_row:, :kernel_center_col], 2)
img_out[-kernel_center_row:, -kernel_center_col:] = np.rot90(img_in[-kernel_center_row:, -kernel_center_col:], 2)
# sides
img_out[:kernel_center_row, kernel_center_col:-kernel_center_col] = np.flip(img_in[:kernel_center_row, :], 0)
img_out[kernel_center_row:-kernel_center_row, :kernel_center_col] = np.flip(img_in[:, :kernel_center_col], 1)
img_out[kernel_center_row:-kernel_center_row, -kernel_center_col:] = np.flip(img_in[:, -kernel_center_col:], 1)
img_out[-kernel_center_row:, kernel_center_col:-kernel_center_col] = np.flip(img_in[-kernel_center_row:, :], 0)
# original image
img_out[kernel_center_row:-kernel_center_row, kernel_center_col:-kernel_center_col] = img_in
return img_out
@staticmethod
def border_reflect_101(img_out, img_in, kernel_center_row, kernel_center_col):
# corner squares
img_out[:kernel_center_row, :kernel_center_col] = np.rot90(img_in[1:(kernel_center_row + 1), 1:(kernel_center_col + 1)], 2)
img_out[:kernel_center_row, -kernel_center_col:] = np.rot90(img_in[1:(kernel_center_row + 1), -(kernel_center_col + 1):-1], 2)
img_out[-kernel_center_row:, :kernel_center_col] = np.rot90(img_in[-(kernel_center_row + 1):-1, 1:(kernel_center_col + 1)], 2)
img_out[-kernel_center_row:, -kernel_center_col:] = np.rot90(img_in[-(kernel_center_row + 1):-1, -(kernel_center_col + 1):-1], 2)
# sides
img_out[:kernel_center_row, kernel_center_col:-kernel_center_col] = np.flip(img_in[1:(kernel_center_row + 1), :], 0)
img_out[kernel_center_row:-kernel_center_row, :kernel_center_col] = np.flip(img_in[:, 1:(kernel_center_col + 1)], 1)
img_out[kernel_center_row:-kernel_center_row, -kernel_center_col:] = np.flip(img_in[:, -(kernel_center_col + 1):-1], 1)
img_out[-kernel_center_row:, kernel_center_col:-kernel_center_col] = np.flip(img_in[-(kernel_center_row + 1):-1, :], 0)
# original image
img_out[kernel_center_row:-kernel_center_row, kernel_center_col:-kernel_center_col] = img_in
return img_out
@staticmethod
def border_wrap(img_out, img_in, kernel_center_row, kernel_center_col):
# corner squares
img_out[:kernel_center_row, :kernel_center_col] = img_in[-kernel_center_row:, -kernel_center_col:]
img_out[:kernel_center_row, -kernel_center_col:] = img_in[-kernel_center_row:, :kernel_center_col]
img_out[-kernel_center_row:, :kernel_center_col] = img_in[:kernel_center_row, -kernel_center_col:]
img_out[-kernel_center_row:, -kernel_center_col:] = img_in[:kernel_center_row, :kernel_center_col]
# sides
img_out[:kernel_center_row, kernel_center_col:-kernel_center_col] = img_in[-kernel_center_row:, :]
img_out[kernel_center_row:-kernel_center_row, :kernel_center_col] = img_in[:, -kernel_center_col:]
img_out[kernel_center_row:-kernel_center_row, -kernel_center_col:] = img_in[:, :kernel_center_col]
img_out[-kernel_center_row:, kernel_center_col:-kernel_center_col] = img_in[:kernel_center_row, :]
# original image
img_out[kernel_center_row:-kernel_center_row, kernel_center_col:-kernel_center_col] = img_in
return img_out
@staticmethod
def border_constant(img_in, kernel_rows, kernel_cols, kernel_center_row, kernel_center_col, const):
rows, cols = img_in.shape[:2]
img_out = const * np.ones((kernel_rows + rows - 1, kernel_cols + cols - 1), dtype=np.float32)
img_out[kernel_center_row:-kernel_center_row, kernel_center_col:-kernel_center_col] = img_in
return img_out
|
[
"noreply@github.com"
] |
RosePY.noreply@github.com
|
d0044cf9410ea97706a0db4c0202cbcc684ff637
|
78c1d3020b827b9ab7527404983986685bbb79ae
|
/challenge 86.py
|
11ea908b4c6999cb7d2bad12f6f20f6b0ed1e17b
|
[] |
no_license
|
Yakobo-UG/Python-by-example-challenges
|
eb3e52601c9546a4984b8da4cf752e59f13af50e
|
5bee4ec3e7619e826a27cde9e6a9a841393f8ccb
|
refs/heads/master
| 2023-08-27T03:01:39.448703
| 2021-10-14T15:43:45
| 2021-10-14T15:43:45
| 401,747,080
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
#Ask the user to enter a new password. Ask them to enter it again. If the two passwords match, display “Thank you”. If the letters are correct but in the wrong case, display the message “They must be in the same case”, otherwise display the message “Incorrect”
newpass =str(input("Enter new passward: "))
newpass1 = str(input("Enter new passward: "))
if newpass == newpass1:
print("Thank you")
elif newpass.lower() == newpass1.lower():
print("They must be in the same case")
else:
print("Incorrect")
|
[
"65670517+Yakobo-UG@users.noreply.github.com"
] |
65670517+Yakobo-UG@users.noreply.github.com
|
79ee635884d6023dadd4796ad9c195a3da5940f9
|
ab084a1775574955c4a46a0fdad00d70cdfa8839
|
/DEAR/main.py
|
d9b861a9ab1e3be0d2460cf6feec78e27de40da2
|
[
"Apache-2.0"
] |
permissive
|
autofix2021icse/AutoFix-2021-ICSE
|
eaa926125dc62e74ccf0853d2664275d21fd931c
|
cbad6d6d58fcb88b1c3986d2f7e738aa035a65d1
|
refs/heads/master
| 2023-01-21T23:16:03.114859
| 2020-11-21T11:12:43
| 2020-11-21T11:12:43
| 290,268,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
import os
import time
import subprocess
rq = "1"
path = os.getcwd()
start = time.process_time()
if os.path.isdir(path + "/sbfl/ochiai/") and os.path.isdir(path + "bert_model/bert_output/"):
try:
subprocess.call("python3 " + path + "run_exp.py " + rq, shell=True)
except:
print("Running Error!")
else:
print("You need to finish the previous steps for preparing!")
time_length = time.process_time() - start
print("Cost " + str(time_length) + " s to finish the model")
|
[
"autofix2021icse@outlook.com"
] |
autofix2021icse@outlook.com
|
6229e7231c45038a0d515693de51d6b3b5ee16fe
|
9b10d8482a7af9c90766747f5f2ddc343871d5fa
|
/Gemtek/AutoTest/DropAP/WRTM-326ACN-DropAP2/premises/library/test.py
|
f158319fe329093f6d1fd74a233f2a489a42b9b0
|
[] |
no_license
|
DarcyChang/MyProjects
|
86d33f5cf8bdfd4b21e64922e4eb25c1afc3c135
|
47efb2dfe13ace264f8943b59b701f39f23c4c17
|
refs/heads/master
| 2021-05-12T12:43:39.255082
| 2020-09-23T06:42:03
| 2020-09-23T06:42:03
| 117,419,269
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
__author__ = 'alu'
import re
import time
import cafe
from cafe.resp.response_map import ResponseMap
from collections import OrderedDict
from demo.alu_demo.User_Cases.test_lib import Teststeplib_e7 as e7_lib
res = "ONT Subscriber Info Status" \
"---------- ------------------------------------------------ ---------------" \
"205 <no subscriber ID> enabled" \
" Last Location: 2/1"
r = ResponseMap(res)
table1 = r.table_match_by_delimiter()
print"table1:",table1[-1]
print type(table1[-1])
|
[
"cychang0916@gmail.com"
] |
cychang0916@gmail.com
|
35a47b027566248963ff354a2a07b0ef7377d61c
|
1bccf0b1374dcfddfc3e320fd5b6af499334df2d
|
/scripts/hashtagUserCounts.py
|
4a780d5a584536af79f7279e772bc4f2cc89c7c9
|
[
"Unlicense"
] |
permissive
|
chebee7i/twitter
|
6b245f5a7b7510089b62d48567e6208e1fe8a1db
|
ec1d772c3ef7d2288ac8051efb8637378f3ec195
|
refs/heads/master
| 2021-01-01T16:25:13.242941
| 2015-06-24T19:39:24
| 2015-06-24T19:39:24
| 23,846,593
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,533
|
py
|
"""
Insert the number of users that tweeted each hashtag.
"""
import twitterproj
import pymongo
from collections import defaultdict
import itertools
import json
import os
import io
db = twitterproj.connect()
def add_user_counts(bot_filtered=True):
collection = db.tweets.with_hashtags
if bot_filtered:
skip_users = twitterproj.subcollections.get_skip_users()
target = db.hashtags.bot_filtered
else:
skip_users = set([])
target = db.hashtags
counts = defaultdict(int)
users = defaultdict(set)
for i, tweet in enumerate(collection.find()):
user_id = tweet['user']['id']
if user_id in skip_users:
continue
for hashtag in tweet['hashtags']:
counts[hashtag] += 1
users[hashtag].add(user_id)
for i, (hashtag, count) in enumerate(counts.iteritems()):
target.update({'_id': hashtag, 'count': count},
{"$set": {'user_count': len(users[hashtag])}},
upsert=False)
def to_json(filename, mincount=1000, bot_filtered=True):
if bot_filtered:
collection = db.hashtags.bot_filtered
else:
collection = db.hashtags
rows = []
if mincount is not None:
it = collection.find({'user_count': {'$gte': mincount}})
else:
it = colelction.find()
for doc in it:
row = [doc['_id'], doc['count'], doc['user_count']]
rows.append(row)
data = {'data': rows}
with open(filename, 'w') as fobj:
json.dump(data, fobj)
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def to_csv(filename, mincount=1000, bot_filtered=True):
"""
Writes hashtags to CSV, filtering hashtags that were not mentioned by
some minimum number of users.
"""
if bot_filtered:
collection = db.hashtags.bot_filtered
else:
collection = db.hashtags
rows = []
if mincount is not None:
it = collection.find({'user_count': {'$gte': mincount}})
else:
it = colelction.find()
it = it.sort('user_count', pymongo.DESCENDING)
basename, ext = os.path.splitext(filename)
if not ext:
ext = '.csv'
data = """
This file contains information regarding the UTF-8 encoded CSV file:
{0}{1}
Each line of that file contains 3 pieces of information, separated by commas:
1. hashtag
2. number of times the hashtag was tweeted
3. number of users who tweeted the hashtag
Lines are sorted, descendingly, according to column 3.
Counts are tabulated wrt geotagged tweets in the contiguous states.
{2}
Hashtags were included only if they were tweeted by at least {3} users across all regions.
"""
if bot_filtered:
text = 'Tweets from users determined to be robots were excluded from the counting process.'
else:
text = ''
data = data.format(basename, ext, text, mincount)
with open(basename + '.txt', 'w') as fobj:
fobj.write(data)
with io.open(basename + ext, 'w', encoding='utf-8') as fobj:
for docs in grouper(10000, it):
rows = []
for doc in docs:
if doc is None:
break
row = [doc['_id'], str(doc['count']), str(doc['user_count'])]
rows.append(','.join(row))
fobj.write(u'\n'.join(rows))
fobj.write(u'\n') # So groups are separated.
|
[
"chebee7i@gmail.com"
] |
chebee7i@gmail.com
|
70e87e1fb732bfc445b50e28f7b074420ad1f86b
|
9eabceb375a96b5b87b4e2a59a89a71bfa577db1
|
/AutomateTestSiteWithSelenium/GetLinkFromPage.py
|
01761028303efd3a36b510c29505229ba0f385a3
|
[] |
no_license
|
fitriKimmy/ScriptingWithPython
|
67463e9b8085bf13450c93c3370d52e29cac131e
|
c51b0463b574902fbf9a600cb76b0a7da90e8d1c
|
refs/heads/master
| 2022-12-14T04:21:55.760126
| 2020-09-07T11:20:44
| 2020-09-07T11:20:44
| 293,294,949
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("https://sites.google.com/site/httpwwwseleniumhqorg/")
element = driver.find_element_by_xpath('//*[@id="sites-canvas-main-content"]/table/tbody/tr/td/div/div[2]/div[2]/table/tbody/tr/td[1]/center/a[1]/img')
element.click()
driver.back()
search_element = driver.find_element_by_id('jot-ui-searchInput')
search_element.send_keys('web driver')
go_button = driver.find_element_by_class_name('goog-inline-block')
go_button.click()
link_elements = driver.find_elements_by_tag_name('a')
for i in range (len(link_elements)):
print(link_elements[i].get_attribute('href'))
driver.quit()
|
[
"fitri.manurung@go-jek.com"
] |
fitri.manurung@go-jek.com
|
3289483dd8a5b66b32a7ef75b2909f8266588ad9
|
4dbe500e89d35051f9a9ade4343a24d932fbd90b
|
/utils/CapacitorSense.py
|
fc17d8473ed1f954016ea193b37fbb555acf90e2
|
[
"MIT"
] |
permissive
|
basimkhajwal/RaspberryPi
|
4b9de905f553960f687478ff7a8f7cfa61d2fa98
|
1e9a9fbfe201f0fe642613c9fe784e2104b79dcf
|
refs/heads/master
| 2021-01-01T15:59:55.945267
| 2015-08-09T09:21:14
| 2015-08-09T09:21:14
| 28,642,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
import RPi.GPIO as g
SEND_PIN = 4
TIME_OUT = 10000
def setupCapacitorSense(pin):
global SEND_PIN
SEND_PIN = pin
g.setmode(g.BCM)
def discharge(pinNum):
g.setup(pinNum, g.OUT)
g.output(pinNum, g.LOW)
def capacitorSense(pinNum):
discharge(SEND_PIN)
discharge(pinNum)
g.setup(pinNum, g.IN)
g.output(SEND_PIN, g.HIGH)
total = 0
while g.input(pinNum) == g.LOW and total < TIME_OUT:
total += 1
return total
|
[
"basimkhajwal@gmail.com"
] |
basimkhajwal@gmail.com
|
4254173570e3eee3dc26eb055cdeb90b9e523c73
|
10356bdea212035d71a5d310bf798962ab7c3cee
|
/문제300/문제91-100.py
|
99be99f93038daa81b34b473a2350de40be72f63
|
[] |
no_license
|
name165/300prob
|
1ded94619a6b2b4f7539ff7e5aef53b00c8ef987
|
cd63335c73c4bd7481a95e8e30a60daffdf212ad
|
refs/heads/master
| 2023-06-13T03:15:10.791890
| 2021-07-10T08:11:41
| 2021-07-10T08:11:41
| 363,336,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
#91
inventory = {"메로나": [300, 20], "비비빅": [400, 3], "죠스바": [250, 100]}
print(inventory)
#92
print(inventory["메로나"][0], "원")
#93
print(inventory["메로나"][1], "개")
#94
inventory['월드콘'] = [500, 7]
print(inventory)
#95
icecream = {'탱크보이': 1200, '폴라포': 1200, '빵빠레': 1800, '월드콘': 1500, '메로나': 1000}
lis = list(icecream.keys())
print(lis)
#96
icecream = {'탱크보이': 1200, '폴라포': 1200, '빵빠레': 1800, '월드콘': 1500, '메로나': 1000}
lis = list(icecream.values())
print(lis)
#97
icecream = {'탱크보이': 1200, '폴라포': 1200, '빵빠레': 1800, '월드콘': 1500, '메로나': 1000}
lis = list(icecream.values())
print(sum(lis))
#98
icecream = {'탱크보이': 1200, '폴라포': 1200, '빵빠레': 1800, '월드콘': 1500, '메로나': 1000}
new_product = {"팥빙수": 2700, "아맛나": 1000}
icecream.update(new_product)
print(icecream)
#99
keys = ("apple", "pear", "peach")
vals = (300, 250, 400)
results = dict(zip(keys, vals))
print(results)
#100
data = ['09/05', '09/06', '09/07', '09/08', '09/09']
close_price = [10500, 10300, 10100, 10800, 11000]
close_table = dict(zip(data, close_price))
print(close_table)
|
[
"1234@ruu.kr"
] |
1234@ruu.kr
|
b36520204abceb7ea228c6496db2a9d6c5a07791
|
a15f288c00d1fd280c327263dd1c608d85cce2ac
|
/ganarcrm_django/team/migrations/0002_auto_20210514_0455.py
|
f822abd8f6fe37dd00623ccb5cd693dbf4bf2f23
|
[
"MIT"
] |
permissive
|
mburaksoran/ganarcrmExample
|
721c018a907cef16c1916bf529253be50c084bd8
|
5b2561a73a49abe608d695cf50bf516c7d08b124
|
refs/heads/main
| 2023-06-09T00:55:05.987826
| 2021-07-01T05:19:06
| 2021-07-01T05:19:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
# Generated by Django 3.2 on 2021-05-14 04:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Plan',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('max_leads', models.IntegerField(default=5)),
('max_clients', models.IntegerField(default=5)),
('price', models.IntegerField(default=0)),
],
),
migrations.AddField(
model_name='team',
name='plan',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='teams', to='team.plan'),
),
]
|
[
"codewithstein@Steins-MacBook-Pro.local"
] |
codewithstein@Steins-MacBook-Pro.local
|
37ded26c5f8eb43d97bf060b3afc87241f198359
|
8cd5c8d035b292caacb4d5aeb4bce0ca19ee3061
|
/DataScience/Python/scripts/analyzeLT1Data.py
|
26a0c0ca34c8c8282df8f396a739d7724288e03a
|
[] |
no_license
|
toddjm/projects
|
33be8c753d24a1ba1f719d661fed36d264957c45
|
709497603a6dcfb43feabfc812b3ad8bb3f183c9
|
refs/heads/master
| 2023-02-19T03:13:46.538278
| 2023-02-07T22:33:00
| 2023-02-07T22:33:00
| 47,042,227
| 1
| 1
| null | 2015-12-01T00:01:22
| 2015-11-28T22:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,334
|
py
|
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# Set data directory. Are we on Linux or Mac OS X?
if sys.platform == 'linux':
data_dir = '/home/todd/data/lt1_dmax/clean/LT1/running'
elif sys.platform == 'darwin':
data_dir = '/Users/todd/data/lt1_dmax/clean/LT1/running'
# Set output directory.
out_dir = os.path.join(data_dir, 'output')
# Read in .csv file names from data directory as list.
file_names = glob.glob(os.path.join(data_dir, '*.csv'))
file_names = [i.strip('\n') for i in file_names]
for fn in file_names:
# Read data from csv file to temporary array.
data = np.genfromtxt(fn, delimiter=',')
# Pull out arrays of x and y values.
xs = data[:, 0]
ys = data[:, 1]
# If we have a zero value for an xs, drop it from analysis.
if xs[0] == 0:
xs = xs[1:]
ys = ys[1:]
# Plot original data.
plt.plot(xs, ys, label='original')
# Create a linear space for fitting polynomials.
x_for_fit = np.linspace(xs.min(), xs.max(), num=100)
# Fit 1st-order polynomial to the first and last
# points to get L2.
L2_coef = np.polyfit([xs[0], xs[-1]], [ys[0], ys[-1]], deg=1)
L2 = np.polyval(L2_coef, x_for_fit)
# Plot L2.
plt.plot(x_for_fit, L2, label='L2')
# Calculate 1st derivative of L2.
L2_deriv = (ys[-1] - ys[0]) / (xs[-1] - xs[0])
# Fit a 3rd-order polynomial to the data for L3.
L3_coef = np.polyfit(xs, ys, deg=3)
L3 = np.polyval(L3_coef, x_for_fit)
# Plot L3.
plt.plot(x_for_fit, L3, label='L3')
# Find where the first derivative of L3 = L2_deriv.
dmax = np.polynomial.polynomial.polyroots([L3_coef[2] - L2_deriv,
2 * L3_coef[1],
3 * L3_coef[0]]).max()
# Draw line for Dmax.
plt.axvline(dmax, color='magenta',
label='Dmax {0:.2f}'.format(dmax))
# Set title and axes labels.
title = fn.split('/')[-1]
title = title.split('.')[0]
plt.title(title)
# Show plot.
plt.rcParams['legend.loc'] = 'best'
plt.xlabel('pace (m/s)')
#plt.xlabel('power (W)')
plt.ylabel('[lactate], mmol/L')
plt.legend()
# plt.show()
out_file = title + '.png'
plt.savefig(os.path.join(out_dir, out_file))
plt.close()
|
[
"todd.minehardt@gmail.com"
] |
todd.minehardt@gmail.com
|
2b24ec034a34c513b9c6b1bd086580ec9964d106
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/HcalEventSetup/python/CaloTowerGeometryDBWriter_cfi.py
|
dc75ba33a6cedd5c4191026f97719656397c89c3
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 531
|
py
|
import FWCore.ParameterSet.Config as cms
CaloTowerHardcodeGeometryEP = cms.ESProducer( "CaloTowerHardcodeGeometryEP" ,
appendToDataLabel = cms.string("_master")
)
CaloTowerGeometryToDBEP = cms.ESProducer( "CaloTowerGeometryToDBEP" ,
applyAlignment = cms.bool(False) ,
appendToDataLabel = cms.string("_toDB")
)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
dd189af651c79002cd632c915e1dd70d79d565d8
|
cfb24bc7ab53e79518f54e552f02979843097b3e
|
/analysis/NT_waterPercentCalcTool.py
|
0efaae47033ab37892bb4043aa3631c961b376de
|
[] |
no_license
|
b7j/DRLMRepo
|
3305e9ae449e03cda3684e7f01b89a2ec3eddfc7
|
c889c9cd530245773c7b552b36e8f8435bc59669
|
refs/heads/master
| 2021-01-18T07:56:51.933126
| 2016-01-18T04:16:38
| 2016-01-18T04:16:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
#!/usr/bin/env python
"""
Calculate water count as a percentage of all observations
"""
import argparse
from rios import applier
#function to get cmd line inputs
def getCmdargs():
p = argparse.ArgumentParser()
p.add_argument("--infile", help="input file")
p.add_argument("--outfile", help="output")
cmdargs = p.parse_args()
if cmdargs.infile is None:
p.print_help()
sys.exit()
return cmdargs
PCNT_NULL = 255
def main():
"""
Main routine
"""
cmdargs = getCmdargs()
infiles = applier.FilenameAssociations()
outfiles = applier.FilenameAssociations()
controls = applier.ApplierControls()
infiles.counts = cmdargs.infile
outfiles.percent = cmdargs.outfile
controls.setStatsIgnore(PCNT_NULL)
applier.apply(doPcnt, infiles, outfiles, controls=controls)
def doPcnt(info, inputs, outputs):
"""
Called by RIOS.
Calculate percentage
"""
wetcount = inputs.counts[0].astype(numpy.float32)
obscount = inputs.counts[1]
percent = (100.0 * wetcount / obscount).astype(numpy.uint8)
outputs.percent = numpy.array([percent])
# Set a null value in areas where denominator is zero
outputs.percent[0][inputs.count[1] == 0] = PCNT_NULL
def readList(filelist):
if __name__ == "__main__":
main()
|
[
"grant.staben@nt.gov.au"
] |
grant.staben@nt.gov.au
|
0d3f557b43efbcac584e45ddcbba1b34fb2f82e5
|
f93e22a5d5cf00544ca4091d0cbc5c4bc40beb8e
|
/ex29.py
|
62498f5d3b6ba63d9b5c7a2797c3add5d4010ab7
|
[] |
no_license
|
hyh840819/h3
|
ea9f44061e1d5729fd103fe7e873415995e0035d
|
9220450e45b7af895f710ce77599b8974c1bc8a6
|
refs/heads/master
| 2020-04-07T08:52:54.563053
| 2018-11-19T13:53:58
| 2018-11-19T13:53:58
| 158,231,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is dry!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs")
if people == dogs:
print("Pelople are dogs.")
|
[
"hyh@GCDCH.lan"
] |
hyh@GCDCH.lan
|
d8d3d453e04ceda4bf51fc916da07205ed182225
|
9c9b2a8074e976814f7773c19980e77efda53371
|
/infomatics/массивы/d.py
|
2a755d897f0ee488eab6e95ebc010632c04b5fc2
|
[] |
no_license
|
nazkeyramazan/hw
|
451b692a9001e5f6087785f8b2b6215f9af90956
|
f8d9e11463a1a4181b24df95f6d0a501ec8441a7
|
refs/heads/master
| 2023-01-08T00:25:31.335217
| 2020-04-24T00:14:57
| 2020-04-24T00:14:57
| 248,481,581
| 0
| 0
| null | 2023-01-07T17:23:44
| 2020-03-19T11:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 176
|
py
|
n = input()
s = input()
data = s.split()
temp = int(data[0])
k = 0
for i in range(len(data)):
if temp < int(data[i]):
temp = int(data[i])
k += 1
print(k)
|
[
"nazkeyramazan@gmail.com"
] |
nazkeyramazan@gmail.com
|
d737f53dc33062761b45567992c4853639e72b21
|
d1b1184fee07edcf5fb5fe0ae1c2c31964545178
|
/capitolzen/users/migrations/0009_auto_20171118_1454.py
|
a8e30fd9f1224ae85036cbf077653ceb0f7a4d68
|
[] |
no_license
|
CapitolZen/CapitolZen-Backend
|
d3bc64e3cff6d887fa97fe04b67b761b04aa850a
|
604a5c5d933815ab4b51b3f24b1cbd0b71c18133
|
refs/heads/master
| 2021-09-16T00:15:34.529015
| 2018-06-13T15:41:16
| 2018-06-13T15:41:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-18 19:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('proposals', '0036_bill_cosponsors_dups2'),
('users', '0008_auto_20171118_0936'),
]
operations = [
migrations.AddField(
model_name='action',
name='wrapper',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='proposals.Wrapper'),
),
migrations.AlterField(
model_name='action',
name='committee',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='proposals.Committee'),
),
migrations.AlterField(
model_name='action',
name='title',
field=models.CharField(choices=[('bill:introduced', 'Bill Introduced'), ('wrapper:updated', 'Bill Updated'), ('organization:user-add', 'User Joined'), ('organization:user-invite', 'User Invited'), ('organization:mention', 'Mentioned')], db_index=True, max_length=225),
),
]
|
[
"djwasserman@gmail.com"
] |
djwasserman@gmail.com
|
ef78e98a09ae3fe21dcc1b6b0286dd42dddd2b12
|
252621784d24992db1ea51760bcccc89c878dd09
|
/checking_for_blur.py
|
ea4c71cca21a9ae0faa13e0a978f40d99d4e2133
|
[] |
no_license
|
SamuelZachara/Clarendon_filter_reverse_engineering
|
4b007a46508a6958f44b3ccdff2e8569cedcdabd
|
b97cbb2d2a6ec97e09f1d752b3f5c21a94862f0d
|
refs/heads/master
| 2020-04-11T16:13:24.269969
| 2018-12-15T15:18:02
| 2018-12-15T15:18:02
| 161,916,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,170
|
py
|
try:
from PIL import Image
except ImportError:
import Image
import os, sys
img_black = Image.open('filtered/solid_000000.png').convert('RGB')
img_white = Image.open('filtered/solid_FFFFFF.png').convert('RGB')
img_black_original = Image.open('original/solid_000000.png').convert('RGB')
img_white_original = Image.open('original/solid_FFFFFF.png').convert('RGB')
img_checker_bw_0 = Image.open('filtered/checker_bw_0.png').convert('RGB')
img_checker_bw_1 = Image.open('filtered/checker_bw_1.png').convert('RGB')
img_horiz_bw_stripes_0 = Image.open('filtered/horiz_bw_stripes_0.png').convert('RGB')
img_horiz_bw_stripes_1 = Image.open('filtered/horiz_bw_stripes_1.png').convert('RGB')
img_vert_bw_stripes_0 = Image.open('filtered/vert_bw_stripes_0.png').convert('RGB')
img_vert_bw_stripes_1 = Image.open('filtered/vert_bw_stripes_1.png').convert('RGB')
img_checker_bw_0_jpg = Image.open('original_jpg/checker_bw_0.jpg').convert('RGB')
img_checker_bw_1_jpg = Image.open('original_jpg/checker_bw_1.jpg').convert('RGB')
img_horiz_bw_stripes_0_jpg = Image.open('original_jpg/horiz_bw_stripes_0.jpg').convert('RGB')
img_horiz_bw_stripes_1_jpg = Image.open('original_jpg/horiz_bw_stripes_1.jpg').convert('RGB')
img_vert_bw_stripes_0_jpg = Image.open('original_jpg/vert_bw_stripes_0.jpg').convert('RGB')
img_vert_bw_stripes_1_jpg = Image.open('original_jpg/vert_bw_stripes_1.jpg').convert('RGB')
img = Image.new('RGB', (1080, 1080), color = 'white')
def generate_color_error(error):
# A simple function that transforms counted error into color pixel representation
return (int((error/10)*256), 0, 255)
def count_error(pixel1, pixel2):
# used to count difference between two given pixels
# returns average of errors for each color channel
(r1, g1, b1) = pixel1
(r2, g2, b2) = pixel2
error_red = ((abs(r1-r2) / 255)*100)
error_green = ((abs(g1-g2) / 255)*100)
error_blue = ((abs(b1-b2) / 255)*100)
return (error_red + error_green + error_blue)/3
def check_error_checker_image(image, img_check1, img_check2, img, error_image_name):
# used generating error images for checker_image
# images are generated into folder error_images
width, height = image.size
im = image.load()
im_check1 = img_check1.load()
im_check2 = img_check2.load()
test = img.load()
error_sum = 0
count = 0
max_error = 0
for x in range(0, width, 2):
for y in range(0, width, 2):
current_error = count_error(im[x,y], im_check1[x,y])
if current_error > max_error:
max_error = current_error
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error1 = error_sum/count
error_sum = 0
count = 0
for x in range(1, width, 2):
for y in range(1, width, 2):
current_error = count_error(im[x,y], im_check1[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error2 = error_sum/count
error_im_check1 = (error1 + error2)/2
print(error_im_check1)
error_sum = 0
count = 0
for x in range(0, width, 2):
for y in range(1, width, 2):
current_error = count_error(im[x,y], im_check2[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error1 = error_sum/count
error_sum = 0
count = 0
for x in range(1, width, 2):
for y in range(0, width, 2):
current_error = count_error(im[x,y], im_check2[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error2 = error_sum/count
error_im_check2 = (error1 + error2)/2
print(error_im_check2)
####
img.save('error_images/' + error_image_name)
####
return (error_im_check1 + error_im_check2)/2
def check_error_ver_stripes(image, img_check1, img_check2, img, error_image_name):
# used generating error images for ver_stripes
width, height = image.size
im = image.load()
im_check1 = img_check1.load()
im_check2 = img_check2.load()
test = img.load()
error_sum = 0
count = 0
for x in range(0, width, 2):
for y in range(height):
current_error = count_error(im[x,y], im_check1[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error1 = error_sum/count
error_sum = 0
count = 0
for x in range(1, width, 2):
for y in range(height):
current_error = count_error(im[x,y], im_check2[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error2 = error_sum/count
####
img.save('error_images/' + error_image_name)
####
return (error1 + error2)/2
def check_error_horiz_stripes(image, img_check1, img_check2, img, error_image_name):
# used generating error images for horiz_stripes
width, height = image.size
im = image.load()
im_check1 = img_check1.load()
im_check2 = img_check2.load()
test = img.load()
error_sum = 0
count = 0
for x in range(width):
for y in range(0, height, 2):
current_error = count_error(im[x,y], im_check1[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error1 = error_sum/count
error_sum = 0
count = 0
for x in range(width):
for y in range(1, height, 2):
current_error = count_error(im[x,y], im_check2[x,y])
error_sum += current_error
count = count + 1
test[x, y] = generate_color_error(current_error)
error2 = error_sum/count
####
img.save('error_images/' + error_image_name)
####
return (error1 + error2)/2
print('Error on image img_checker_bw_0 = ' + str(check_error_checker_image(img_checker_bw_0, img_white, img_black, img, 'checker_bw_0_error.png')) + '%')
print('Error on image img_checker_bw_1 = ' + str(check_error_checker_image(img_checker_bw_1, img_black, img_white, img, 'checker_bw_1_error.png')) + '%')
print('Error on image horiz_bw_stripes_0 = ' + str(check_error_horiz_stripes(img_horiz_bw_stripes_0, img_black, img_white, img, 'horiz_bw_stripes_0_error.png')) + '%')
print('Error on image horiz_bw_stripes_1 = ' + str(check_error_horiz_stripes(img_horiz_bw_stripes_1, img_white, img_black, img, 'horiz_bw_stripes_1_error.png')) + '%')
print('Error on image ver_bw_stripes_0 = ' + str(check_error_ver_stripes(img_vert_bw_stripes_0, img_black, img_white, img, 'ver_bw_stripes_0.png')) + '%')
print('Error on image ver_bw_stripes_1 = ' + str(check_error_ver_stripes(img_vert_bw_stripes_1, img_white, img_black, img, 'ver_bw_stripes_1.png')) + '%')
print('Error on image img_checker_bw_0_jpg = ' + str(check_error_checker_image(img_checker_bw_0_jpg, img_white_original, img_black_original, img, 'checker_bw_0_error_jpg.png')) + '%')
print('Error on image img_checker_bw_1_jpg = ' + str(check_error_checker_image(img_checker_bw_1_jpg, img_black_original, img_white_original, img, 'checker_bw_1_error_jpg.png')) + '%')
print('Error on image horiz_bw_stripes_0_jpg = ' + str(check_error_horiz_stripes(img_horiz_bw_stripes_0_jpg, img_black_original, img_white_original, img, 'horiz_bw_stripes_0_error_jpg.png')) + '%')
print('Error on image horiz_bw_stripes_1_jpg = ' + str(check_error_horiz_stripes(img_horiz_bw_stripes_1_jpg, img_white_original, img_black_original, img, 'horiz_bw_stripes_1_error_jpg.png')) + '%')
print('Error on image ver_bw_stripes_0_jpg = ' + str(check_error_ver_stripes(img_vert_bw_stripes_0_jpg, img_black_original, img_white_original, img, 'ver_bw_stripes_0_jpg.png')) + '%')
print('Error on image ver_bw_stripes_1_jpg = ' + str(check_error_ver_stripes(img_vert_bw_stripes_1_jpg, img_white_original, img_black_original, img, 'ver_bw_stripes_1_jpg.png')) + '%')
|
[
"noreply@github.com"
] |
SamuelZachara.noreply@github.com
|
e0d15e783287de2d262fe73d9af3aab0bc994dbd
|
409d0fac6c50ecb0b087be00d0a43802b3cb9d30
|
/hdcs_manager/source/hsm/hsm/api/contrib/services.py
|
bb0c1784f0ae9f3327d799437e5e34a973e0fdb2
|
[
"Apache-2.0"
] |
permissive
|
xuechendi/HDCS
|
a5f5ac62c9da063359c173c352f4f484ba4d7cdb
|
4ce169d5be1c3e8614e5e6f198d3593eb904b97d
|
refs/heads/master
| 2021-01-15T22:18:59.554181
| 2017-07-05T13:03:57
| 2017-07-05T13:03:57
| 99,896,630
| 1
| 1
| null | 2017-08-10T07:51:52
| 2017-08-10T07:51:52
| null |
UTF-8
|
Python
| false
| false
| 3,343
|
py
|
import webob.exc
from hsm.api import extensions
from hsm import db
from hsm import exception
from hsm.openstack.common import log as logging
from hsm.openstack.common import timeutils
from hsm import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('storage', 'services')
class ServiceController(object):
def index(self, req):
"""
Return a list of all running services. Filter by host & service name.
"""
context = req.environ['hsm.context']
authorize(context)
now = timeutils.utcnow()
services = db.service_get_all(context)
host = ''
if 'host' in req.GET:
host = req.GET['host']
service = ''
if 'service' in req.GET:
service = req.GET['service']
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s['host'] == host]
binary_key = binary or service
if binary_key:
services = [s for s in services if s['binary'] == binary_key]
svcs = []
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = abs(utils.total_seconds(delta))
art = (alive and "up") or "down"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
svcs.append({"binary": svc['binary'], 'host': svc['host'],
'zone': svc['availability_zone'],
'status': active, 'state': art,
'updated_at': svc['updated_at']})
return {'services': svcs}
def update(self, req, id, body):
"""Enable/Disable scheduling for a service"""
context = req.environ['hsm.context']
authorize(context)
if id == "enable":
disabled = False
elif id == "disable":
disabled = True
else:
raise webob.exc.HTTPNotFound("Unknown action")
try:
host = body['host']
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest()
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = db.service_get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound('Unknown service')
db.service_update(context, svc['id'], {'disabled': disabled})
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound("service not found")
status = id + 'd'
return {'host': host,
'service': service,
'disabled': disabled,
'binary': binary,
'status': status}
class Services(extensions.ExtensionDescriptor):
"""Services support"""
name = "Services"
alias = "hsm-services"
updated = "2016-11-18T00:00:00-00:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('hsm-services',
ServiceController())
resources.append(resource)
return resources
|
[
"yuan.zhou@intel.com"
] |
yuan.zhou@intel.com
|
ff8f86292617a8597edc809076063b0f6261283c
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SiamFC/src/dataset.py
|
83c8e8c6e3e8ce864c6d87af664d12aa08b25bc6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 5,068
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""VID dataset"""
import os
import pickle
import hashlib
import cv2
import numpy as np
from src.config import config
class ImagnetVIDDataset():
"""
used in GeneratorDataset to deal with image pair
Args:
db : lmdb file
video_names : all video name
data_dir : the location of image pair
z_transforms : the transforms list used in exemplar
x_transforms : the transforms list used in instance
training : status of training
"""
def __init__(self, db, video_names, data_dir, z_transforms, x_transforms, training=True):
self.video_names = video_names
self.data_dir = data_dir
self.z_transforms = z_transforms
self.x_transforms = x_transforms
meta_data_path = os.path.join(data_dir, 'meta_data.pkl')
self.meta_data = pickle.load(open(meta_data_path, 'rb'))
self.meta_data = {x[0]: x[1] for x in self.meta_data}
for key in self.meta_data.keys():
trajs = self.meta_data[key]
for trkid in list(trajs.keys()):
if len(trajs[trkid]) < 2:
del trajs[trkid]
self.txn = db.begin(write=False)
self.num = len(self.video_names) if config.num_per_epoch is None or not \
training else config.num_per_epoch
def imread(self, path):
"""
read iamges according to path
Args :
path : the image path
"""
key = hashlib.md5(path.encode()).digest()
img_buffer = self.txn.get(key)
img_buffer = np.frombuffer(img_buffer, np.uint8)
img = cv2.imdecode(img_buffer, cv2.IMREAD_COLOR)
return img
def _sample_weights(self, center, low_idx, high_idx, s_type='uniform'):
"""
According to the center image to pick another image,setting the weights
will be used in different type distribution
Args:
center : the position of center image
low_idx : the minimum of id
high_idx : the max of id
s_type : choose different distribution. "uniform", "sqrt", "linear"
can be chosen
"""
weights = list(range(low_idx, high_idx))
weights.remove(center)
weights = np.array(weights)
if s_type == 'linear':
weights = abs(weights - center)
elif s_type == 'sqrt':
weights = np.sqrt(abs(weights - center))
elif s_type == 'uniform':
weights = np.ones_like(weights)
return weights / sum(weights)
def __getitem__(self, idx):
idx = idx % len(self.video_names)
video = self.video_names[idx]
trajs = self.meta_data[video]
trkid = np.random.choice(list(trajs.keys()))
traj = trajs[trkid]
assert len(traj) > 1, "video_name: {}".format(video)
exemplar_idx = np.random.choice(list(range(len(traj))))
exemplar_name = os.path.join(self.data_dir, video,
traj[exemplar_idx] + ".{:02d}.x.jpg".format(trkid))
exemplar_img = self.imread(exemplar_name)
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_BGR2RGB)
# sample instance
low_idx = max(0, exemplar_idx - config.frame_range)
up_idx = min(len(traj), exemplar_idx + config.frame_range)
weights = self._sample_weights(exemplar_idx, low_idx, up_idx, config.sample_type)
instance = np.random.choice(traj[low_idx:exemplar_idx] + traj[exemplar_idx + 1:up_idx],
p=weights)
instance_name = os.path.join(self.data_dir, video, instance + ".{:02d}.x.jpg".format(trkid))
instance_img = self.imread(instance_name)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_BGR2RGB)
if np.random.rand(1) < config.gray_ratio:
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_RGB2GRAY)
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_GRAY2RGB)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_RGB2GRAY)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_GRAY2RGB)
exemplar_img = self.z_transforms(exemplar_img)
instance_img = self.x_transforms(instance_img)
return exemplar_img, instance_img
def __len__(self):
return self.num
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
1b1e6aeb37ad7d24e7758ebda4a967bed305b19e
|
e9badfc473218a65d87d516011fc4fcde0086270
|
/test/consumers/listen.py
|
97ad5f8e26c568707e7a938e6ca44f84193a628a
|
[] |
no_license
|
dlf-dds/simplestack
|
6a5bcb7be42d7b003ec56feb2f0a3bb7a45a464a
|
231967091a3a8167fc1ef296b013008dbde99c2b
|
refs/heads/master
| 2023-03-07T07:55:10.746462
| 2021-02-24T03:39:56
| 2021-02-24T03:39:56
| 341,301,722
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,679
|
py
|
from datetime import datetime, timedelta
import json
import boto3
from botocore.exceptions import ClientError
import os, sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from test.consumers.localconfig import config as kinconfig
def get_kinesis_data_iterator(stream_name=None, minutes_running=None):
print(f"AWS_SECURITY_TOKEN: {os.getenv('AWS_SECURITY_TOKEN',None)}")
kinconfig.kinesis_connection()
kinconfig.logging.info(f'stream_name={stream_name}')
shard_iterator_list = kinconfig.get_kinesis_shard_iterator(stream_name)
# Calculate end time
end_time = datetime.now() + timedelta(minutes=minutes_running)
while True:
try:
print(f'heartbeat -- shard_iterator_list: {[s[-10:] for s in shard_iterator_list]}')
new_shard_iterator_list = []
records_collection_list =[]
for shard_iterator in shard_iterator_list:
now = datetime.now()
if end_time < now:
kinconfig.logging.info(f"time out: {end_time}")
return
kinconfig.logging.info('Time: {0}'.format(now.strftime('%Y/%m/%d %H:%M:%S')))
record_response = kinconfig.kinesis_client.get_records(ShardIterator=shard_iterator)
if len(record_response['Records']) > 0:
kinconfig.logging.info(f"shard_iterator: {shard_iterator[-10:]}")
kinconfig.logging.info(f"num records: {len(record_response['Records'])}")
for record in record_response['Records']:
kinconfig.logging.info(f'record: {record}')
last_sequence = record['SequenceNumber']
records_collection_list.append(json.loads(record['Data']))
yield {"Records":records_collection_list}
if record_response['NextShardIterator']:
new_shard_iterator_list.append(record_response['NextShardIterator'])
else:
new_shard_iterator_list.append(shard_iterator)
shard_iterator_list=new_shard_iterator_list
except ClientError as e:
kinconfig.logging.error(e)
time.sleep(3)
if __name__ == "__main__":
import os
minutes_running = int(os.getenv('MINUTES_RUNNING','1'))
stream_name=os.getenv("LISTEN_STREAM_NAME","demo-sink-stream")
os.environ['AWS_REGION']='us-east-1'
kinesis_data = get_kinesis_data_iterator(
stream_name=stream_name,
minutes_running=minutes_running
)
for d in kinesis_data:
print(d)
|
[
""
] | |
8297c1cd649f32897877433fe43a585f83a07d0a
|
63d6ca98653d2b77691960a5a1af21795e7d752e
|
/main.py
|
a04c2fe5dce205b32a788c6a959dada2adc10159
|
[] |
no_license
|
LeoKavanagh/dublin-forecast
|
19eff1393fa722985ab957d58295d86df299b592
|
722d3fdbc388114750fee17454d6e77f6829cd82
|
refs/heads/master
| 2023-08-03T21:56:55.752057
| 2022-12-10T23:59:35
| 2022-12-10T23:59:35
| 233,704,646
| 0
| 0
| null | 2023-07-25T20:48:51
| 2020-01-13T22:13:57
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
import os
import requests
from lxml import html
def get_dublin_forecast():
url = 'https://www.met.ie/forecasts/dublin'
page = requests.get(url)
tree = html.fromstring(page.content)
dublin = tree \
.xpath('/html/body/div/div[1]/section'
'/div/div[1]/div[1]/p[4]/text()')[0] \
.replace('\n', ' ')
return dublin
def send(url, forecast):
lambda_endpoint = url + '/' + forecast
requests.get(lambda_endpoint)
def main(*args, **kwargs):
lambda_url = os.environ['LAMBDA_URL']
fc = get_dublin_forecast()
send(lambda_url, fc)
if __name__=='__main__':
main(None)
|
[
"leok90@gmail.com"
] |
leok90@gmail.com
|
20854815fa9eda8c5b3834f842d820c263c2b27d
|
f6e0761d8e95bf274a058437803607c6bc9fe473
|
/algos/train.py
|
1b6be98fb351a6b25ddc4e1f1faeecf5898e06e5
|
[
"Apache-2.0"
] |
permissive
|
HekpoMaH/algorithmic-concepts-reasoning
|
b6c54e4d64e7801b08429941c3949225b1046f13
|
17c87faad2fbe8481455de34a145a4753a2fe4d0
|
refs/heads/master
| 2023-06-29T18:46:39.134524
| 2021-07-15T16:30:44
| 2021-07-15T16:30:44
| 384,510,679
| 25
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,361
|
py
|
"""
Script to train models.
Usage:
train.py [--algos=ALGO]... [options]
Options:
-h --help Show this screen.
--use-TF Use Teacher Forcing or not during training. Not
using it, would add a GRU cell at the GNN update
step. [default: False]
--use-GRU Force the usage of a GRU cell at the GNN update step.
[default: False]
--no-use-concepts Do NOT utilise concepts bottleneck. If you set this
flag, set below flag too. [default: False]
--no-use-concepts-sv Do NOT utilise concepts supervision. [default: False]
--pooling=PL What graph pooling mechanism to use for termination.
One of {attention, predinet, max, mean}. [default: predinet]
--no-next-step-pool Do NOT use next step information for termination.
Use current instead. [default: False]
--algos ALGO Which algorithms to train {BFS, parallel_coloring}.
Repeatable parameter. [default: BFS]
--epochs EP Number of epochs to train. [default: 250]
--no-patience Do not utilise patience, train for max epochs. [default: False]
--model-name MN Name of the model when saving. Defaults to current time
and date if not provided.
--L1-loss Add L1 loss to the concept decoders part. [default: False]
--prune-epoch PE The epoch on which to prune logic layers.
The default of -1 does no pruning at all. [default: -1]
--use-decision-tree Use decision tree for concept->output mapping. [default: False]
--drop-last-concept Drop last concept? (Works only for coloring) [default: False]
--seed S Random seed to set. [default: 47]
"""
# TODO: consider adding a requirements.txt file
import json
import os
import torch
import torch.optim as optim
import random
import numpy as np
import deep_logic
import sympy
import schema
import copy
from deep_logic.utils.layer import prune_logic_layers
from datetime import datetime
from docopt import docopt
from algos.models import AlgorithmProcessor
from algos.hyperparameters import get_hyperparameters
from algos.utils import iterate_over, plot_decision_trees, load_algorithms_and_datasets
from pprint import pprint
from tqdm import tqdm
# torch.autograd.set_detect_anomaly(True)
args = docopt(__doc__)
schema = schema.Schema({'--algos': schema.And(list, [lambda n: n in ['BFS', 'parallel_coloring']]),
'--help': bool,
'--use-TF': bool,
'--L1-loss': bool,
'--use-decision-tree': bool,
'--no-use-concepts-sv': bool,
'--no-use-concepts': bool,
'--pooling': schema.And(str, lambda s: s in ['attention', 'predinet', 'mean', 'max']),
'--no-next-step-pool': bool,
'--use-GRU': bool,
'--no-patience': bool,
'--drop-last-concept': bool,
'--model-name': schema.Or(None, schema.Use(str)),
'--prune-epoch': schema.Use(int),
'--seed': schema.Use(int),
'--epochs': schema.Use(int)})
args = schema.validate(args)
NAME = args["--model-name"] if args["--model-name"] is not None else datetime.now().strftime("%b-%d-%Y-%H-%M")
torch.manual_seed(args['--seed'])
random.seed(args['--seed'])
np.random.seed(args['--seed'])
torch.cuda.manual_seed(0)
print("SEEDED with", args['--seed'])
_DEVICE = get_hyperparameters()['device']
_DIM_LATENT = get_hyperparameters()['dim_latent']
processor = AlgorithmProcessor(
_DIM_LATENT,
bias=get_hyperparameters()['bias'],
prune_logic_epoch=args['--prune-epoch'],
use_gru=not args['--use-TF'] or args['--use-GRU'],
).to(_DEVICE)
_gnrtrs = get_hyperparameters()['generators']
if 'parallel_coloring' in args['--algos']:
_gnrtrs += ['deg5']
print("GENERATORS", _gnrtrs)
load_algorithms_and_datasets(args['--algos'],
processor, {
'split': 'train',
'generators': _gnrtrs,
'num_nodes': 20,
},
use_TF=args['--use-TF'],
use_concepts=not args['--no-use-concepts'],
use_concepts_sv=not args['--no-use-concepts-sv'],
drop_last_concept=args['--drop-last-concept'],
pooling=args['--pooling'],
next_step_pool=not args['--no-next-step-pool'],
L1_loss=args['--L1-loss'],
prune_logic_epoch=args['--prune-epoch'],
use_decision_tree=args['--use-decision-tree'],
new_coloring_dataset=False,
bias=get_hyperparameters()['bias'])
best_model = copy.deepcopy(processor)
best_score = float('inf')
print(processor)
term_params = []
normal_params = []
for name, param in processor.named_parameters():
if '_term' in name or 'termination' in name or 'predinet' in name:
term_params.append(param)
else:
normal_params.append(param)
lr = get_hyperparameters()[f'lr']
optimizer = optim.Adam([
{'params': term_params, 'lr': lr},
{'params': normal_params, 'lr': lr}
],
lr=get_hyperparameters()[f'lr'],
weight_decay=get_hyperparameters()['weight_decay'])
patience = 0
hardcode_outputs = False
hardcoding = torch.zeros(list(processor.algorithms.values())[0].concept_features, dtype=torch.bool).to(_DEVICE)
hardcoding[0] = False
hardcoding[1] = False
hardcoding = None
for epoch in range(args['--epochs']):
processor.load_split('train')
processor.train()
iterate_over(processor, optimizer=optimizer, epoch=epoch, hardcode_concepts=hardcoding, hardcode_outputs=hardcode_outputs)
if epoch == processor.prune_logic_epoch:
best_score = float('inf')
for name, algorithm in processor.algorithms.items():
if algorithm.use_concepts:
algorithm.concept_decoder = prune_logic_layers(
algorithm.concept_decoder,
epoch,
algorithm.prune_logic_epoch,
device=_DEVICE)
serialised_models_dir = './algos/serialised_models/'
if not os.path.isdir(serialised_models_dir):
os.makedirs(serialised_models_dir)
if (epoch + 1) % 10 == 0:
torch.save(processor.state_dict(), './algos/serialised_models/test_'+NAME+'_epoch_'+str(epoch)+'.pt')
processor.eval()
if (epoch+1) % 1 == 0:
processor.eval()
print("EPOCH", epoch)
for spl in ['val']:
print("SPLIT", spl)
processor.load_split(spl)
iterate_over(processor, epoch=epoch, hardcode_concepts=hardcoding, hardcode_outputs=hardcode_outputs)
total_sum = 0
for name, algorithm in processor.algorithms.items():
print("algo", name)
pprint(algorithm.get_losses_dict(validation=True))
pprint(algorithm.get_validation_accuracies())
total_sum += sum(algorithm.get_losses_dict(validation=True).values())
if spl == 'val':
patience += 1
if total_sum < best_score and spl == 'val':
best_score = total_sum
best_model = copy.deepcopy(processor)
torch.save(best_model.state_dict(), './algos/serialised_models/best_'+NAME+'.pt')
patience = 0
total_sum2 = 0
print("PATIENCE", patience, total_sum, total_sum2, best_score)
if patience >= 50 and not args['--no-patience']:
break
torch.save(best_model.state_dict(), './algos/serialised_models/best_'+NAME+'.pt')
if args['--use-decision-tree']:
iterate_over(best_model, fit_decision_tree=True, hardcode_concepts=hardcoding)
plot_decision_trees(best_model.algorithms)
print("TESTING!!!")
if not args['--no-use-concepts'] and not args['--use-decision-tree']:
iterate_over(best_model, extract_formulas=True, epoch=0, hardcode_concepts=hardcoding, hardcode_outputs=hardcode_outputs)
for algorithm in best_model.algorithms.values():
print("EXPLANATIONS", algorithm.explanations)
best_model.eval()
best_model.load_split('test')
iterate_over(best_model, apply_formulas=True and not args['--no-use-concepts'] and not args['--use-decision-tree'], apply_decision_tree=args['--use-decision-tree'], epoch=0, hardcode_concepts=hardcoding, hardcode_outputs=hardcode_outputs)
for algorithm in best_model.algorithms.values():
pprint(algorithm.get_validation_accuracies())
|
[
"dobrikg666@gmail.com"
] |
dobrikg666@gmail.com
|
8eeaa0ca64e1bf2b2d43b5a3ce16af064f666d4a
|
67a442ecabcdca9f54f5920874d0095d57f98ede
|
/gewittergefahr/gg_utils/dilation_test.py
|
ffb5a2725b02b9dd7ffdc08e4b856685a7be3f54
|
[
"MIT"
] |
permissive
|
thunderhoser/GewitterGefahr
|
58ba3446c1cc154f56c12c4354dff05b34c12b13
|
1835a71ababb7ad7e47bfa19e62948d466559d56
|
refs/heads/master
| 2022-07-23T06:47:13.883598
| 2022-07-15T12:43:48
| 2022-07-15T12:43:48
| 104,016,785
| 29
| 13
|
MIT
| 2020-12-18T20:44:33
| 2017-09-19T02:37:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,662
|
py
|
"""Unit tests for dilation.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import dilation
TOLERANCE = 1e-6
SMALL_PERCENTILE = 12.5
LARGE_PERCENTILE = 87.5
DILATION_HALF_WIDTH_IN_PIXELS = 1
INPUT_MATRIX = numpy.array(
[[-20., -15., -10., -5., 0.],
[-10., -5., 0., 5., 10.],
[0., 5., 10., numpy.nan, numpy.nan],
[10., 15., 20., numpy.nan, numpy.nan]])
OUTPUT_MATRIX_SMALL_PERCENTILE = numpy.array(
[[-15., -15., -10., -5., numpy.nan],
[-15., -15., -10., -5., numpy.nan],
[-5., -5., numpy.nan, numpy.nan, numpy.nan],
[numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan]])
OUTPUT_MATRIX_LARGE_PERCENTILE = numpy.array(
[[numpy.nan, numpy.nan, numpy.nan, 5., 5.],
[numpy.nan, 5., 5., 10., 5.],
[10., 15., 15., 10., 5.],
[10., 15., 15., 10., numpy.nan]])
OUTPUT_MATRIX_LARGEST_ABS_VALUE = numpy.array(
[[-15., -15., -10., 5., 5.],
[-15., -15., -10., 10., 5.],
[10., 15., 15., 10., 5.],
[10., 15., 15., 10., numpy.nan]])
class DilationTests(unittest.TestCase):
"""Each method is a unit test for dilation.py."""
def test_dilate_2d_matrix_small_percentile(self):
"""Ensures correct output from dilate_2d_matrix with small prctile."""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=SMALL_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_SMALL_PERCENTILE, atol=TOLERANCE,
equal_nan=True))
def test_dilate_2d_matrix_large_percentile(self):
"""Ensures correct output from dilate_2d_matrix with large prctile."""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_LARGE_PERCENTILE, atol=TOLERANCE,
equal_nan=True))
def test_dilate_2d_matrix_take_largest_abs_value(self):
"""Ensures correct output from dilate_2d_matrix.
In this case, take_largest_absolute_value = True.
"""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS,
take_largest_absolute_value=True)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_LARGEST_ABS_VALUE, atol=TOLERANCE,
equal_nan=True))
if __name__ == '__main__':
unittest.main()
|
[
"ryan.lagerquist@ou.edu"
] |
ryan.lagerquist@ou.edu
|
c24903effb648177524e2e48f7b0d39f7732c27d
|
541b606962881272c7899415a9208bca3a0ff901
|
/Project2/detection/yolo3/utils.py
|
4a378f7c12a23d65947f6b210d119dce708111b0
|
[] |
no_license
|
Caramelo18/VCOM-Projects
|
7c11b10c7a380952a01152c28db4211ae85a42d0
|
e024b6e10f725e5ee66b614548738c8eed8bb10e
|
refs/heads/master
| 2020-04-02T13:25:53.238629
| 2019-01-08T17:09:49
| 2019-01-08T17:09:49
| 154,480,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,875
|
py
|
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
path = line[0] + ' ' + line[1]
image = Image.open(path)
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[2:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data
|
[
"fabiocaramelo18@gmail.com"
] |
fabiocaramelo18@gmail.com
|
0fdc9e6d551890e0e2a34dc9116c567f50767d41
|
adafccfb21985ddd6a88c9459d6391d04e3e5e26
|
/kojima.py
|
3db3571067ac137b5021f5bea0f1122fb8267cde
|
[] |
no_license
|
Hose270/Kojima-Name-Generator
|
a79db503d0a4f9c1b8b21c83e87b16bf7dd8f33e
|
ba2e1be5a8f59bf098254a44909f7d0ee18b48e5
|
refs/heads/master
| 2020-09-08T09:28:33.595272
| 2019-11-12T00:18:22
| 2019-11-12T00:18:22
| 221,094,178
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,863
|
py
|
import random
print("Welcome to the Automated Kojima Name Generator.")
print("As a Kojima character, you may have multiple names.")
print("Determining how many names you have...")
randnum1 = random.randrange(1,6)
if randnum1 == 6:
print("You have one name and six alternate names. After completing this generator, complete it again.")
else:
print("You only have one name. Thank goodness, that makes this easier.")
print("First, I need some information from you.")
realname = input("What is your name? ")
longjob = input("What do you do at your occupation? ")
shortjob = input("Condense the verb in your answer to a single -er noun. ")
petbreed = input("What was your first pet's specific species/breed? If you never had a pet, please answer with an animal you wish you owned. ")
longmemory = input("What's your most embarrassing childhood memory? Be specific. ")
shortmemory = input("Now condense that memory into two words! ")
dontstab = input("What is the object you'd least like to be stabbed by? ")
talent = input("What is something you are good at? (verb ending in -ing) ")
carrots = input("How many carrots do you believe you could eat in one sitting, if someone,like, forced you to eat as many carrots as possible? ")
intfear = input("What is your greatest intangible fear? (e.g. death, loneliness, fear itself) ")
tfear = input("What is your greatest tangible fear? (e.g. horses) ")
recent = input("What is the last thing you did before starting this worksheet? ")
condition = input("What condition is your body currently in? (single word answer) ")
state = input("Favorite state of matter? ")
kinda = input("A word your name kind of sounds like? (e.g. Brian -> Brain) ")
zodiac = input("What is your Zodiac sign? ")
mbti = input("If you had to define your personality in one word, what would it be? ")
print("Great! Now some other information. ")
print("See, Kojima character names reflect his own idiosyncrasies. He can't help himself. ")
kurt = input("Who is your favorite film character? (NOTE: must be played by Kurt Russell) ")
stan = input("What is the last word of the title of your favorite Kubrick film? ")
band = input("What is the first word in the title of your favorite Joy Division album? ")
npr = input("What is a scientific term you picked up from listening to NPR once? ")
guns = input("What is a piece of military hardware you think looks cool even though war is bad? ")
longmads = input("What is something you'd enjoy watching Mads Mikkelson do? Be specific. ")
shortmads = input("Now condense that to one word. ")
print("Great! Now some quick things to clear up. ")
print("Sometimes, a character will have a plot-based condition that affects their name. ")
print("You, too, might have a condition that affects your name. ")
print("Determining if you have The-Man Condition...")
randnum2 = random.randrange(1,4)
if randnum2 == 4:
print("You have this condition.")
print("Your last name will include the suffix-man.")
print("If your name already has -man at the end of it, I guess you're just going to have -manman at the end of your name.")
theman = "man"
else:
print("You do not have this condition.")
theman = ""
print("Determining if you have the Condition Condition...")
randnum3 = random.randrange(1,8)
if randnum3 == 5:
print("You have this condition.")
print("You're big.")
print("Your name must have the word Big at the beginning of it.")
stat = "Big "
elif randnum3 == 7:
print("You have this condition.")
print("You're older than you once were.")
print("Your nane must have the word Old at the beginning of it.")
stat = "Old "
elif randnum3 == 8:
print("You have this condition.")
print("You know how you currently are.")
stat = condition+" "
else:
print("You do not have this condition.")
stat = ""
print("Determining if you have the Clone condition...")
randnum4 = random.randrange(1, 12)
if randnum4 == 12:
print("You are a clone of someone else, or you have been brainwashed into becoming a mental doppelganger of someone else.")
print("Find someone who has completed this worksheet and replace 50% ofyour Kojima name with 50% of their Kojima name.")
else:
print("You do not have this condition.")
print("Determining if you have the Kojima Condition...")
randnum5 = random.randrange(1, 100)
if randnum5 == 69:
print("Oh no.")
print("You are Hideo Kojima.")
print("Hideo Kojima created you and is also you.")
print("You are the man who created himself and there is nothing you can do about it.")
print("You're in Kojima's world—your world—and that's just the breaks, pal.")
print("Stop this worksheet now. You're Hideo Kojima. Go do the things that Hideo Kojima does.")
else:
print("You do not have this condition.")
print("Great! Time to determine your name category.")
randnum6 = random.randrange(1,20)
if randnum6 == 1:
print("You have a normal name.")
kojimaname = stat+realname+theman
print("Your Kojima name is.... ",kojimaname, ".")
print("That's your name. Your Kojima name is probably just your actual name. Sorry if you were expecting something wild.")
elif randnum6 < 7:
print("You have an occupational name.")
randnum7 = random.randrange(1,4)
if randnum7 == 1:
fname = mbti
elif randnum7 == 2:
fname = talent
elif randnum7 == 3:
fname = kinda
else:
fname = kurt
kojimaname = stat+fname+" "+shortjob+theman
print("Your Kojima name is.... ",kojimaname,".")
elif randnum6 < 11:
print("You have a horny name.")
print("Kojima's characters and stories are irrevocably horny. Weirdly horny, sure, but horny nonetheless.")
randnum7 = random.randrange(1,4)
if randnum7 == 1:
fname = state
elif randnum7 == 2:
fname = "Naked"
elif randnum7 == 3:
fname = talent
else:
fname = zodiac
middle = input("Do you want your middle name to be Lickable? ")
if middle == "Yes":
mname = "Lickable "
else:
mname = ""
kojimaname = stat+fname+" "+mname+petbreed+theman
print("Your Kojima name is.... ",kojimaname,".")
elif randnum6 < 14:
print("You have a The name.")
randnum7 = random.randrange(1,4)
if randnum7 == 1:
lname = intfear
elif randnum7 == 2:
lname = tfear
elif randnum7 == 3:
lname = shortmemory
else:
lname = guns
kojimaname = "The "+stat+lname+theman
print("Your Kojima name is....",kojimaname,".")
elif randnum6 < 18:
print("You have a cool name.")
print("Kojima loves to be cool. Sometimes, his idea of cool is a bit strange, but it is always cool to Hideo Kojima, and that's what matters.")
randnum7 = random.randrange(1,6)
if randnum7 == 1:
lname = stan
elif randnum7 == 2:
lname = band
elif randnum7 == 3:
lname = npr
elif randnum7 == 4:
lname = talent
elif randnum7 == 5:
lname = intfear
else:
lname = kinda
kojimaname = stat+shortmads+" "+lname+theman
print("Your Kojima name is....",kojimaname,".")
elif randnum6 < 20:
print("You have a violent name.")
randnum7 = random.randrange(1,4)
if randnum7 == 1:
lname = npr
elif randnum7 == 2:
lname = state
elif randnum7 == 3:
lname = guns
else:
lname = tfear
kojimaname = stat+dontstab+" "+lname+theman
print("Your Kojima name is....",kojimaname,".")
else:
print("You have a name that lacks subtext.")
kojimaname = stat+recent+theman
print("Your Kojima name is....",kojimaname,".")
print("Now come up with a great backstory for your name. That's it. Bye.")
|
[
"noreply@github.com"
] |
Hose270.noreply@github.com
|
b940481b0759bcba75f4f7b4bdfb26031e028840
|
8d9cd15eee6013461f02b9aa46f0ac792c0a0acb
|
/GLUECode_Service/ML_Utils/__init__.py
|
089108a38548b446c8ef758fd784b5089a4b4e53
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lanl/GLUE
|
c8f3e942b0be11054bc48cb46f4bcb730920eea0
|
4241b581a4544b6ee50803f8b51ccabfb15d1b0e
|
refs/heads/main
| 2023-04-06T14:56:28.340056
| 2022-12-07T14:55:16
| 2022-12-07T14:55:16
| 476,812,171
| 3
| 6
|
NOASSERTION
| 2022-12-23T17:45:49
| 2022-04-01T17:29:17
|
Python
|
UTF-8
|
Python
| false
| false
| 38
|
py
|
import nn_learner
import rf_learner
|
[
"apachalieva@lanl.gov"
] |
apachalieva@lanl.gov
|
a39a3955c32ebd19a52c15a123063afe5b0c659f
|
9d9897f6a400f13945e6f45114dd2b528e171269
|
/Data Structures and Algorithms/First Index Of Element Using Recursion Using Index.py
|
2fc7c50978aac559d9466f789dfecb97a199ed7f
|
[] |
no_license
|
shahbaz181990/MyPythonCodes
|
4b56d4cc6ce4adf1513f5021a64fc7da474e62f1
|
5650f69e29622b55412d45d5cd4ff36d5d7bef77
|
refs/heads/master
| 2022-12-17T09:25:55.722137
| 2020-07-07T06:56:33
| 2020-07-07T06:56:33
| 277,568,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
def firstIndex(arr, x, si):
l = len(arr)
if si == l:
return -1
if arr[si] == x:
return si
op = firstIndex(arr, x, si + 1)
return op
# Main
from sys import setrecursionlimit
setrecursionlimit(11000)
n=int(input())
arr=list(int(i) for i in input().strip().split(' '))
si = int(input())
x=int(input())
print(firstIndex(arr, x, si))
|
[
"shahbazshaikh1990@yahoo.com"
] |
shahbazshaikh1990@yahoo.com
|
570d5e82d5c6785c52442478d82a296869969b12
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/authorization/v20180501/policy_set_definition_at_management_group.py
|
319473b88f6f90a3be594135a1f64be8fef76212
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 16,443
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PolicySetDefinitionAtManagementGroupArgs', 'PolicySetDefinitionAtManagementGroup']
@pulumi.input_type
class PolicySetDefinitionAtManagementGroupArgs:
def __init__(__self__, *,
management_group_id: pulumi.Input[str],
policy_definitions: pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[Union[str, 'PolicyType']]] = None):
"""
The set of arguments for constructing a PolicySetDefinitionAtManagementGroup resource.
:param pulumi.Input[str] management_group_id: The ID of the management group.
:param pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]] policy_definitions: An array of policy definition references.
:param pulumi.Input[str] description: The policy set definition description.
:param pulumi.Input[str] display_name: The display name of the policy set definition.
:param Any metadata: The policy set definition metadata.
:param Any parameters: The policy set definition parameters that can be used in policy definition references.
:param pulumi.Input[str] policy_set_definition_name: The name of the policy set definition to create.
:param pulumi.Input[Union[str, 'PolicyType']] policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
pulumi.set(__self__, "management_group_id", management_group_id)
pulumi.set(__self__, "policy_definitions", policy_definitions)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if policy_set_definition_name is not None:
pulumi.set(__self__, "policy_set_definition_name", policy_set_definition_name)
if policy_type is not None:
pulumi.set(__self__, "policy_type", policy_type)
@property
@pulumi.getter(name="managementGroupId")
def management_group_id(self) -> pulumi.Input[str]:
"""
The ID of the management group.
"""
return pulumi.get(self, "management_group_id")
@management_group_id.setter
def management_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "management_group_id", value)
@property
@pulumi.getter(name="policyDefinitions")
def policy_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]]:
"""
An array of policy definition references.
"""
return pulumi.get(self, "policy_definitions")
@policy_definitions.setter
def policy_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['PolicyDefinitionReferenceArgs']]]):
pulumi.set(self, "policy_definitions", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The policy set definition description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the policy set definition.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy set definition metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[Any]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
The policy set definition parameters that can be used in policy definition references.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[Any]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="policySetDefinitionName")
def policy_set_definition_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the policy set definition to create.
"""
return pulumi.get(self, "policy_set_definition_name")
@policy_set_definition_name.setter
def policy_set_definition_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_set_definition_name", value)
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[pulumi.Input[Union[str, 'PolicyType']]]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
@policy_type.setter
def policy_type(self, value: Optional[pulumi.Input[Union[str, 'PolicyType']]]):
pulumi.set(self, "policy_type", value)
class PolicySetDefinitionAtManagementGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
management_group_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]]] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[Union[str, 'PolicyType']]] = None,
__props__=None):
"""
The policy set definition.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The policy set definition description.
:param pulumi.Input[str] display_name: The display name of the policy set definition.
:param pulumi.Input[str] management_group_id: The ID of the management group.
:param Any metadata: The policy set definition metadata.
:param Any parameters: The policy set definition parameters that can be used in policy definition references.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]] policy_definitions: An array of policy definition references.
:param pulumi.Input[str] policy_set_definition_name: The name of the policy set definition to create.
:param pulumi.Input[Union[str, 'PolicyType']] policy_type: The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PolicySetDefinitionAtManagementGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The policy set definition.
:param str resource_name: The name of the resource.
:param PolicySetDefinitionAtManagementGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PolicySetDefinitionAtManagementGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
management_group_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[Any] = None,
parameters: Optional[Any] = None,
policy_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PolicyDefinitionReferenceArgs']]]]] = None,
policy_set_definition_name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[Union[str, 'PolicyType']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PolicySetDefinitionAtManagementGroupArgs.__new__(PolicySetDefinitionAtManagementGroupArgs)
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
if management_group_id is None and not opts.urn:
raise TypeError("Missing required property 'management_group_id'")
__props__.__dict__["management_group_id"] = management_group_id
__props__.__dict__["metadata"] = metadata
__props__.__dict__["parameters"] = parameters
if policy_definitions is None and not opts.urn:
raise TypeError("Missing required property 'policy_definitions'")
__props__.__dict__["policy_definitions"] = policy_definitions
__props__.__dict__["policy_set_definition_name"] = policy_set_definition_name
__props__.__dict__["policy_type"] = policy_type
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:authorization/v20180501:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20170601preview:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20170601preview:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20180301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20180301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20190101:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20190101:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20190601:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20190601:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20190901:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20190901:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20200301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20200301:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-native:authorization/v20200901:PolicySetDefinitionAtManagementGroup"), pulumi.Alias(type_="azure-nextgen:authorization/v20200901:PolicySetDefinitionAtManagementGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PolicySetDefinitionAtManagementGroup, __self__).__init__(
'azure-native:authorization/v20180501:PolicySetDefinitionAtManagementGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PolicySetDefinitionAtManagementGroup':
"""
Get an existing PolicySetDefinitionAtManagementGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PolicySetDefinitionAtManagementGroupArgs.__new__(PolicySetDefinitionAtManagementGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["name"] = None
__props__.__dict__["parameters"] = None
__props__.__dict__["policy_definitions"] = None
__props__.__dict__["policy_type"] = None
__props__.__dict__["type"] = None
return PolicySetDefinitionAtManagementGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The policy set definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
The display name of the policy set definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional[Any]]:
"""
The policy set definition metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy set definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Any]]:
"""
The policy set definition parameters that can be used in policy definition references.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyDefinitions")
def policy_definitions(self) -> pulumi.Output[Sequence['outputs.PolicyDefinitionReferenceResponse']]:
"""
An array of policy definition references.
"""
return pulumi.get(self, "policy_definitions")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, and Custom.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource (Microsoft.Authorization/policySetDefinitions).
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
02a16f9146f626fe8d119a1d7df6c6e3e0c3e5cf
|
4793cd9fff4d7f2f34e08880e657f886d0089879
|
/obtineDescriptoriExemplePozitive.py
|
02ee47a2261bc23ce9373c6133f2e7c461115160
|
[] |
no_license
|
SurrealEverything/Face-Detection
|
f2379b5d620b93bb89965ee37d0da9371e154516
|
28011ab4976b99142683c360c18c099105ae2a03
|
refs/heads/master
| 2020-04-15T01:55:38.351893
| 2019-01-11T19:27:42
| 2019-01-11T19:27:42
| 164,296,619
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 25 15:17:55 2018
@author: gabriel
"""
import numpy as np
import cv2
import pathlib
def obtineDescriptoriExemplePozitive(parametri):
"""
descriptoriExemplePozitive = matrice NxD, unde:
N = numarul de exemple pozitive de antrenare (fete de oameni)
D = numarul de dimensiuni al descriptorului
"""
imgPathsUnsorted = pathlib.Path(
parametri.numeDirectorExemplePozitive).glob('*.jpg')
imgPaths = sorted([x for x in imgPathsUnsorted])
numarImagini = len(imgPaths)
dimensiuneDescriptoriImagine = round(
parametri.orientari
* (parametri.dimensiuneBloc[0] /
parametri.dimensiuneCelulaHOG[0]) ** 2
* ((parametri.dimensiuneFereastra[0] - parametri.dimensiuneBloc[0])
/ parametri.pasBloc[0] + 1) ** 2)
descriptoriExemplePozitive = np.zeros((
parametri.numarExemplePozitive,
dimensiuneDescriptoriImagine))
hog = cv2.HOGDescriptor(parametri.dimensiuneFereastra,
parametri.dimensiuneBloc,
parametri.pasBloc,
parametri.dimensiuneCelulaHOG,
parametri.orientari)
print('Exista un numar de exemple pozitive = ' + str(numarImagini))
for idx in range(numarImagini):
print('Procesam exemplul pozitiv numarul ' + str(idx))
img = cv2.imread(str(imgPaths[idx]), 0)
desc = hog.compute(img)
descriptoriExemplePozitive[idx] = np.ravel(desc)
return descriptoriExemplePozitive
|
[
"noreply@github.com"
] |
SurrealEverything.noreply@github.com
|
544770342c2dde735ba79dafcd7ca0d7bf54d5d1
|
5a613faa4b257add5c5d057c3dbabcdd39e66a2c
|
/config.py
|
53afe3ba4e9f3cb05a52c5b4857b0e2404d8fd9b
|
[] |
no_license
|
mmk687/mysql_scanner
|
7e227c37498dc5bf75c50cf7a1cf4d9405257275
|
06c18d09f6e137783aa1c35d0a2d5c1aa3c9a818
|
refs/heads/master
| 2020-06-19T12:25:23.803088
| 2019-06-23T15:54:01
| 2019-06-23T15:54:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
""" 运行设置 """
ipcmd = "3306.txt"
port = 3306
usercmd = "dic_username_mysql.txt"
passcmd = "dic_password_mysql.txt"
THREAD = 20
TIMEOUT = 1.0
PRINTALL = 1
""" 数据库设置 """
# ----- 1 主机连接信息
HOST = "119.3.55.220"
PORT = 6789
USER = "4287e7ae11008807e536c6283f82ea2f"
PASSWORD = "2tU4yyHkwu"
# ----- 2 数据库和表
DATABASE = "mysqlscan" # 读取数据库
TABLE = "result" # 读取表
|
[
"juaran@foxmail.com"
] |
juaran@foxmail.com
|
ee390771c7eb19d64e83a1321f36e47b1cc370e0
|
3a3f4477a517c9757432042ceb4939f37762c2a4
|
/PatternMatching/pattern.py
|
fbf0b7c65a6264e02d5feb7ad865bf5fce72d855
|
[] |
no_license
|
marathohoho/leetcode-progress
|
f351ad89905c8e61fd5b5adff62320ce8ba9645d
|
13b298c1074328c130724e328d7c22be642903cb
|
refs/heads/master
| 2020-12-24T01:39:15.117334
| 2020-04-06T20:20:16
| 2020-04-06T20:20:16
| 237,339,061
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
"""
You are given two non-empty strings.
The first one is a pattern consisting of only "x"s and / or "y"s; the other one is a normal string of alphanumeric characters.
Write a function that checks whether or not the normal string matches the pattern.
A string S0 is said to match a pattern if replacing all "x"s in the pattern with some string S1 and replacing all "y"s in the pattern with some string S2 yields the same string S0.
If the input string does not match the input pattern, return an empty array; otherwise, return an array holding the representations of "x" and "y" in the normal string, in that order.
If the pattern does not contain any "x"s or "y"s, the respective letter should be represented by an empty string in the final array that you return.
Assume that there will never be more than one pair of strings S1 and S2 that appropriately represent "x" and "y" in the input string.
Example:
sample input : s1 = "xxyxxy", s2 = "gogogpowerrangergogopowerranger"
"""
from collections import defaultdict
def patternMatcher(pattern, string) :
if len(pattern) > len(string) :
return []
# get new pattern if we have to swap x and y letters
# take a mark if we did swap or not
new_pattern = getNewPattern(pattern)
did_swap = new_pattern[0] != pattern[0]
# hold number of occurencies for each character of the pattern
occurencies = defaultdict(int)
first_y_pos = getCountsAndFirstYPos(new_pattern, occurencies)
if occurencies['y'] != 0 :
# if we have ys in our pattern
string_length = len(string)
for len_of_x in range(1, string_length) :
len_of_y = (string_length - len_of_x * occurencies['x']) / occurencies['y']
if len_of_y % 1 or len_of_y <= 0 :
continue
len_of_y = int(len_of_y)
y_index = first_y_pos * len_of_x
x = string[:len_of_x] # g
y = string[y_index : y_index + len_of_y] #gopowerranger
potential_match = map(lambda char : x if char == 'x' else y, new_pattern)
if string == "".join(potential_match) :
return [x, y] if not did_swap else [y, x]
else :
len_of_x = len(string) / occurencies['x']
if len_of_x % 1 != 0 :
len_of_x = int(len_of_x)
x = string[:len_of_x]
potential_match = map(lambda char : x, new_pattern)
if string == "".join(potential_match) :
return [x, ""] if not did_swap else ["", x]
def getNewPattern(pattern) :
patterLetters = list(pattern)
if pattern[0] == 'x' :
return patterLetters
else :
return list(map(lambda char : 'x' if char == 'y' else 'y', patterLetters))
def getCountsAndFirstYPos(pattern, occurencies):
first_occurence = -1
for i, letter in enumerate(pattern) :
occurencies[letter] += 1
if letter == 'y' and first_occurence == -1 :
first_occurence = i
return first_occurence
if __name__ == "__main__" :
print(patternMatcher('xxyxxy', 'gogopowerrangergogopowerranger'))
|
[
"codmarat@gmail.com"
] |
codmarat@gmail.com
|
a982750f34a5889801079883f97dd27aad027d9e
|
813ab64cfaba9b543c7b2df38b7217e4f484b6c8
|
/recip/storage/Storage.py
|
3578c675d02dfa972bd3661e0338f21bbe475033
|
[
"MIT"
] |
permissive
|
anthonybuckle/Reciprocity-Core
|
83089b41e00af532c440b6676b6d901fb9ea1b73
|
3254073f44e8fe2222aed9879885a2e609d4044a
|
refs/heads/main
| 2022-12-25T18:00:19.476519
| 2020-10-03T19:37:35
| 2020-10-03T19:37:35
| 300,916,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,200
|
py
|
from threading import Lock
from recip.util import Config
from recip.util import DataType
from recip.util import Log
import lmdb
class Storage:
def __init__(self, database, subDatabase=None):
self.database = database
self.subDatabase = subDatabase
self.db = None
self.subDb = None
self.lock = Lock()
def open(self, subDatabase=None):
self.lock.acquire()
self.db = lmdb.open(self.database, max_dbs=Config.getIntValue('MAX_DATABASES'))
subDatabaseBytes = DataType.serialize(self.subDatabase if subDatabase == None else subDatabase)
self.subDb = self.db.open_db(subDatabaseBytes)
def count(self, subDatabase=None):
try:
self.open(subDatabase)
with self.db.begin() as db:
return db.stat(db=self.subDb)['entries']
except IOError:
Log.error('Unable to count database entries')
finally:
self.close()
def get(self, key, subDatabase=None):
try:
self.open(subDatabase)
with self.db.begin(db=self.subDb) as db:
return db.get(key)
except IOError:
Log.error('Unable to get record using key: %s' % key)
finally:
self.close()
def set(self, key, value, subDatabase=None):
try:
self.open(subDatabase)
with self.db.begin(write=True) as db:
db.put(key, value, db=self.subDb)
except IOError:
Log.error('Unable to set record using key: %s value: %s' % (key, value))
finally:
self.close()
def remove(self, key, subDatabase=None):
try:
self.open(subDatabase)
with self.db.begin(write=True, db=self.subDb) as db:
db.delete(key)
except IOError:
Log.error('Unable to remove record using key: %s' % key)
finally:
self.close()
def close(self):
try:
if self.db != None:
self.db.close()
self.db = None
self.subDb = None
finally:
self.lock.release()
|
[
"anthonybuckle@Anthonys-MacBook-Air.local"
] |
anthonybuckle@Anthonys-MacBook-Air.local
|
70c71c2ad3631f21a4c22b0233a232f9a6806161
|
9a4e0678a30e8f187663f10c287af4bfa8778f46
|
/pypeline/utils.py
|
692003fef3e4b2ba144e1c7fb5817e2b260a9f7a
|
[] |
no_license
|
t00n/pypeline
|
0dafb571c74250c2441c99441125b06eca5ece51
|
a8e48ff1545b1e622b9f37cd237c94c7a874ffd6
|
refs/heads/master
| 2020-03-15T12:21:46.485907
| 2018-05-29T14:39:44
| 2018-05-29T14:39:44
| 132,142,091
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from datetime import datetime
from dateutil.parser import parse as date_parse
def to_datetime(d):
if isinstance(d, str):
return date_parse(d)
elif isinstance(d, int) or isinstance(d, float):
return datetime.fromtimestamp(d)
elif isinstance(d, datetime):
return d
else:
raise ValueError("{} should be a datetime".format(d))
|
[
"kelkununtel@hotmail.com"
] |
kelkununtel@hotmail.com
|
39d1538ea860dc25dadc47eab812b0f96ef7a6e8
|
8ba5b57e191312601d0324e58dd2c59d1ed9c821
|
/bifrost/__init__.py
|
bda370a45a0f621ffebfdab8c47ebe90707f77a3
|
[] |
no_license
|
gutierrydsn/bifrost-httpserver
|
105b0ee6b778b9cbe0407d8e2c13989928f3a478
|
29513f3fac1b6e920dd2687ed10baa6de83311e2
|
refs/heads/master
| 2021-10-26T00:36:32.591566
| 2019-04-09T00:51:38
| 2019-04-09T00:51:38
| 121,187,929
| 1
| 1
| null | 2018-02-22T14:20:12
| 2018-02-12T01:56:19
|
Python
|
UTF-8
|
Python
| false
| false
| 26
|
py
|
VERSION = (0, 1, 0, None)
|
[
"gutierrydsn@hotmail.com"
] |
gutierrydsn@hotmail.com
|
22dc3c5bbcba175591d6beefbe7927a9a8a90906
|
b632d8cb122e098945e70f608517b2715eaee224
|
/services/facedetection/facedetection_fromwebcam.py
|
3ac452cde7de80eaae2e8fef4517f1d73220bc4f
|
[
"Apache-2.0"
] |
permissive
|
lordlothar99/angus-doc
|
0155cc109487593853e90f4757a1693f06db9427
|
75b9b955ae273f634c8f91016859d9f30c74f0d6
|
refs/heads/master
| 2021-01-14T08:34:49.084929
| 2015-07-22T14:21:10
| 2015-07-22T14:21:10
| 39,728,683
| 0
| 0
| null | 2015-07-26T14:39:05
| 2015-07-26T14:39:04
| null |
UTF-8
|
Python
| false
| false
| 1,364
|
py
|
#!/usr/bin/env python
import cv2
import angus
if __name__ == '__main__':
### Web cam index might be different from 0 on your setup.
stream_index = 0
cap = cv2.VideoCapture(stream_index)
if not cap.isOpened():
print "Cannot open stream of index " + str(stream_index)
exit(1)
print "Video stream is of resolution " + str(cap.get(3)) + " x " + str(cap.get(4))
conn = angus.connect()
service = conn.services.get_service("face_detection", version=1)
service.enable_session()
while(cap.isOpened()):
ret, frame = cap.read()
if not frame == None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite("/tmp/image.png", gray)
job = service.process({"image": open("/tmp/image.png")})
res = job.result
if res['nb_faces'] > 0:
for i in range(0,res['nb_faces']):
roi = res['faces'][i]['roi']
cv2.rectangle(frame, (int(roi[0]), int(roi[1])),
(int(roi[0] + roi[2]), int(roi[1] + roi[3])),
(0,255,0))
cv2.imshow('original', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
service.disable_session()
cap.release()
cv2.destroyAllWindows()
|
[
"gwennael.gate@angus.ai"
] |
gwennael.gate@angus.ai
|
11bd179b9acf6d6f2616212cfc4986320e7d7bfd
|
0b060b277b25a96df124657e24839c64df685594
|
/manage.py
|
1481c6e260e0f48afa22beb3ef4fe04094b77a38
|
[] |
no_license
|
xaw-roz/Django-twitter-data-analysis
|
3f2f28eb4fcc9e514e83d5f700be317ff808333e
|
aac5923ef2def79863d9ab06278c10cd08096b4b
|
refs/heads/master
| 2020-03-19T09:10:59.711639
| 2018-06-07T03:29:48
| 2018-06-07T03:29:48
| 136,266,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twitter_data_collector.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"rockstar_saroj@yahoo.com"
] |
rockstar_saroj@yahoo.com
|
ec0d9251a4520617eda6e97e4858c66f73191954
|
66225270641e3b96ef88ce8d940e5a1b36e74460
|
/user_app/api/permissions.py
|
10e224f3921b8645f542f77f715ff36511bdf972
|
[] |
no_license
|
illiasibrahim/backend-via
|
dcf9b6e4350951b1f67bbb15dd0710f03521e3e3
|
a832c52b965542fc9e84fbb0b7ee319521787735
|
refs/heads/master
| 2023-08-10T20:13:14.220933
| 2021-10-13T06:09:03
| 2021-10-13T06:09:03
| 416,613,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from rest_framework import permissions
class IsAddressAuthor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.user == request.user
|
[
"illiasibrahim6@gmail.com"
] |
illiasibrahim6@gmail.com
|
e8bcb3b3cb93588bc8e232a2e53e655898b06d78
|
21da99050c676819660dbd138f4ecbc64b470c22
|
/demo1.py
|
c73c6ea0e84e760d5535a12917c270d1730ef6bc
|
[] |
no_license
|
ChaconFrancoLuis/Academia-Repo-Disenos
|
b8efe94d4661aa785a8ff366b220bed6b5597133
|
aa1b1f6b2c26443bad7b2118fb163dfe2c92c926
|
refs/heads/master
| 2022-11-10T10:16:39.227824
| 2020-07-10T02:17:13
| 2020-07-10T02:17:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
Ios_device = {"address":"192.168.0.11",
"netconf_port":830,
"retconf_port":443,
"username":"cisco",
"password":"cisco123!"}
|
[
"francofer7775@gmail.com"
] |
francofer7775@gmail.com
|
3362ddcbb92b322290f4639c343a80ac56165d15
|
a94be4b6b68f293f7ec546cae40dc25515352a8c
|
/worthy_service/catalog_service.py
|
c09af1a432fe1b0cce40a44cefaaa85f326ed7bc
|
[] |
no_license
|
denniszhoujian/worthyback
|
bbd7129adaf96b5a16149553ad82d5f70b7afbb2
|
d55ebde09574078a96d44919b79fa6e273c92999
|
refs/heads/master
| 2021-01-21T14:04:25.536192
| 2016-05-31T12:47:30
| 2016-05-31T12:47:30
| 45,192,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,402
|
py
|
# encoding: utf-8
import dbhelper_read
import sku_index_access,sku_service, service_config
from datasys.memcachedHelper import memcachedStatic
from worthy_analytics import common_analytics
from utils import regex_dict_helper, collection_utils
mc = memcachedStatic.getMemCache()
P_VALUE_BLACK_LIST = [
u'其他', u'其它',
u'不支持',u'支持',
u'无',
]
P_VALUE_BLACK_LIST_REGEX = [
r'[\\/]+[\w\W]*-',
r'[a-z]+\d{2,}[ ]?-',
r'\d[.]\d{3,}',
r'2[1234]0[ ]?v',
r'1[01]0[ ]v',
r'\d{5,}',
u'℃',
u'°[ ]?c',
u'\d(摄氏|华氏| )?度',
r'[\w\W]+-[\w\W]*-',
u'\d+[种个]',
r'[a-z0-9]{2,}[ ]?-[ ]?[a-z0-9]{2,}',
u'不含',
u'的所有',
u'年\d月',
r'\d[ ]?hz',
]
P_VALUE_SPLIT_LIST = [
',', ';', u',',u';',u'、',
]
def getCatalogs():
sql = 'select catalog_id, catalog_name as category_name from jd_catalog order by order_weight DESC'
retrows = dbhelper_read.executeSqlRead(sql)
for row in retrows:
row['category_id'] = '%s' %(row['catalog_id'])
row.pop('catalog_id')
return retrows
def get_indicator_given_part_of_query(query):
mckey = memcachedStatic.getKey("GET_INDICATOR2::%s" %query)
mcv = mc.get(mckey)
if mcv is not None:
return mcv
retlist = sku_index_access.getSearchResult(query)
sku_id_list = []
i = 0
for sku_id in retlist:
sku_id_list.append("%s" %sku_id)
i += 1
if i >= 30:
break
in_clause = ','.join(sku_id_list)
sql = '''
select
category_id,
count(1) as count_hits,
catalog_id,
catalog_name,
category_name
from
jd_worthy_latest
where sku_id in (%s) and catalog_name is not NULL
group by category_id
order by count_hits DESC
limit %s
''' %(in_clause,service_config.CATEGORY_INDICATOR_MAX_NUM)
retrows = dbhelper_read.executeSqlRead(sql, is_dirty=True)
retlist = []
for row in retrows:
category_id = row['category_id']
# if category_id in ['670-677-5009','670-677-683','670-677-687','670-671-672']:
# continue
category_name = row['category_name']
catalog_id = row['catalog_id']
catalog_name = row['catalog_name']
sql0 = '''
select
p_value,
count(1) as count_hits,
p_key
from
jd_analytic_property_latest
where sku_id in (%s)
and category_id = "%s"
and p_value like "%%%s%%"
group by p_value
-- having count(1) > 1
order by count_hits DESC
limit %s
''' %(in_clause, category_id, query, service_config.PROPERTY_INDICATOR_MAX_NUM)
sql1 = '''
select
p_value,
count(1) as count_hits,
p_key
from
jd_analytic_property_latest
where sku_id in (%s)
and category_id = "%s"
and p_key = '品牌'
group by p_value
-- having count(1) > 1
order by count_hits DESC
limit %s
''' %(in_clause, category_id, service_config.PROPERTY_INDICATOR_MAX_NUM)
sql2 = '''
select
p_value,
count(1) as count_hits,
p_key
from
jd_analytic_property_latest
where sku_id in (%s)
and category_id = "%s"
and p_key <> '品牌'
group by p_value
-- having count(1) > 1
order by count_hits DESC
limit %s
''' %(in_clause, category_id, service_config.PROPERTY_INDICATOR_MAX_NUM)
retrows0 = dbhelper_read.executeSqlRead(sql0)
retrows1 = dbhelper_read.executeSqlRead(sql1)
retrows2 = dbhelper_read.executeSqlRead(sql2)
plist = common_analytics.dedup_leave_max(_retrows_to_list(retrows0+retrows1+retrows2, 'p_value'))
query2 = common_analytics.dedup_inline(query)
if query2 not in plist:
if query2 not in category_name:
plist = [query2] + plist
plist = common_analytics.remove_string_from_list(category_name,plist)
plist = collection_utils.expand_list(plist, P_VALUE_SPLIT_LIST)
qlist = []
for item in plist:
if item not in P_VALUE_BLACK_LIST:
item = item.lower()
if not regex_dict_helper.is_regex_match_list(item, P_VALUE_BLACK_LIST_REGEX):
qlist.append(item)
retlist.append({
'category': [category_id,category_name],
'property': qlist,
})
mc.set(mckey,retlist)
return retlist
#
# def _check_black_list(item, blacklist):
# for b in blacklist:
# if item == b:
# return True
# return False
def _retrows_to_list(retrows, colname):
rlist = []
for row in retrows:
rlist.append(row[colname])
return rlist
if __name__ == '__main__':
print regex_dict_helper.is_regex_match_list('en-el15',P_VALUE_BLACK_LIST_REGEX)
# exit()
ret = get_indicator_given_part_of_query('硬盘')
for item in ret:
print item['category'][1]
for item2 in item['property']:
print '%s\t%s' %(item2,len(item2))
print '-'*60
|
[
"271393@qq.com"
] |
271393@qq.com
|
24c25ead6a54afb83aaae32793c5fe1b094a1445
|
bb57ff81cd01e6c62e8f1ef47a6e172424cb8675
|
/NLP Specialization/NLP with Probabilistic models/Week 1/Auto_Correct.py
|
ac9f53dac9002a95b6cf3489c36f5c4bb02f1e63
|
[] |
no_license
|
RohitRathore1/Repo-2020
|
b6dd87a25e714e3be3bfd76b1f0d04515be75de5
|
8c36669bcecb74ef6a962f71fd8038c443204c30
|
refs/heads/master
| 2022-11-23T23:12:02.120981
| 2020-07-29T06:36:51
| 2020-07-29T06:36:51
| 251,401,730
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54,574
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Assignment 1: Auto Correct
#
# Welcome to the first assignment of Course 2. This assignment will give you a chance to brush up on your python and probability skills. In doing so, you will implement an auto-correct system that is very effective and useful.
# ## Outline
# - [0. Overview](#0)
# - [0.1 Edit Distance](#0-1)
# - [1. Data Preprocessing](#1)
# - [1.1 Exercise 1](#ex-1)
# - [1.2 Exercise 2](#ex-2)
# - [1.3 Exercise 3](#ex-3)
# - [2. String Manipulation](#2)
# - [2.1 Exercise 4](#ex-4)
# - [2.2 Exercise 5](#ex-5)
# - [2.3 Exercise 6](#ex-6)
# - [2.4 Exercise 7](#ex-7)
# - [3. Combining the edits](#3)
# - [3.1 Exercise 8](#ex-8)
# - [3.2 Exercise 9](#ex-9)
# - [3.3 Exercise 10](#ex-10)
# - [4. Minimum Edit Distance](#4)
# - [4.1 Exercise 11](#ex-11)
# - [5. Backtrace (Optional)](#5)
# <a name='0'></a>
# ## 0. Overview
#
# You use autocorrect every day on your cell phone and computer. In this assignment, you will explore what really goes on behind the scenes. Of course, the model you are about to implement is not identical to the one used in your phone, but it is still quite good.
#
# By completing this assignment you will learn how to:
#
# - Get a word count given a corpus
# - Get a word probability in the corpus
# - Manipulate strings
# - Filter strings
# - Implement Minimum edit distance to compare strings and to help find the optimal path for the edits.
# - Understand how dynamic programming works
#
#
# Similar systems are used everywhere.
# - For example, if you type in the word **"I am lerningg"**, chances are very high that you meant to write **"learning"**, as shown in **Figure 1**.
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='auto-correct.png' alt="alternate text" width="width" height="height" style="width:300px;height:250px;" /> Figure 1 </div>
# <a name='0-1'></a>
# #### 0.1 Edit Distance
#
# In this assignment, you will implement models that correct words that are 1 and 2 edit distances away.
# - We say two words are n edit distance away from each other when we need n edits to change one word into another.
#
# An edit could consist of one of the following options:
#
# - Delete (remove a letter): ‘hat’ => ‘at, ha, ht’
# - Switch (swap 2 adjacent letters): ‘eta’ => ‘eat, tea,...’
# - Replace (change 1 letter to another): ‘jat’ => ‘hat, rat, cat, mat, ...’
# - Insert (add a letter): ‘te’ => ‘the, ten, ate, ...’
#
# You will be using the four methods above to implement an Auto-correct.
# - To do so, you will need to compute probabilities that a certain word is correct given an input.
#
# This auto-correct you are about to implement was first created by [Peter Norvig](https://en.wikipedia.org/wiki/Peter_Norvig) in 2007.
# - His [original article](https://norvig.com/spell-correct.html) may be a useful reference for this assignment.
#
# The goal of our spell check model is to compute the following probability:
#
# $$P(c|w) = \frac{P(w|c)\times P(c)}{P(w)} \tag{Eqn-1}$$
#
# The equation above is [Bayes Rule](https://en.wikipedia.org/wiki/Bayes%27_theorem).
# - Equation 1 says that the probability of a word being correct $P(c|w) $is equal to the probability of having a certain word $w$, given that it is correct $P(w|c)$, multiplied by the probability of being correct in general $P(C)$ divided by the probability of that word $w$ appearing $P(w)$ in general.
# - To compute equation 1, you will first import a data set and then create all the probabilities that you need using that data set.
# <a name='1'></a>
# # Part 1: Data Preprocessing
# In[1]:
import re
from collections import Counter
import numpy as np
import pandas as pd
# As in any other machine learning task, the first thing you have to do is process your data set.
# - Many courses load in pre-processed data for you.
# - However, in the real world, when you build these NLP systems, you load the datasets and process them.
# - So let's get some real world practice in pre-processing the data!
#
# Your first task is to read in a file called **'shakespeare.txt'** which is found in your file directory. To look at this file you can go to `File ==> Open `.
# <a name='ex-1'></a>
# ### Exercise 1
# Implement the function `process_data` which
#
# 1) Reads in a corpus (text file)
#
# 2) Changes everything to lowercase
#
# 3) Returns a list of words.
# #### Options and Hints
# - If you would like more of a real-life practice, don't open the 'Hints' below (yet) and try searching the web to derive your answer.
# - If you want a little help, click on the green "General Hints" section by clicking on it with your mouse.
# - If you get stuck or are not getting the expected results, click on the green 'Detailed Hints' section to get hints for each step that you'll take to complete this function.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>General Hints</b></font>
# </summary>
# <p>
#
# General Hints to get started
# <ul>
# <li>Python <a href="https://docs.python.org/3/tutorial/inputoutput.html">input and output<a></li>
# <li>Python <a href="https://docs.python.org/3/library/re.html" >'re' documentation </a> </li>
# </ul>
# </p>
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Detailed Hints</b></font>
# </summary>
# <p>
# Detailed hints if you're stuck
# <ul>
# <li>Use 'with' syntax to read a file</li>
# <li>Decide whether to use 'read()' or 'readline(). What's the difference?</li>
# <li>Choose whether to use either str.lower() or str.lowercase(). What is the difference?</li>
# <li>Use re.findall(pattern, string)</li>
# <li>Look for the "Raw String Notation" section in the Python 're' documentation to understand the difference between r'\W', r'\W' and '\\W'. </li>
# <li>For the pattern, decide between using '\s', '\w', '\s+' or '\w+'. What do you think are the differences?</li>
# </ul>
# </p>
#
# In[2]:
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: process_data
def process_data(file_name):
"""
Input:
A file_name which is found in your current directory. You just have to read it in.
Output:
words: a list containing all the words in the corpus (text file you read) in lower case.
"""
words = [] # return this variable correctly
### START CODE HERE ###
with open(file_name) as f:
file_name_data = f.read()
file_name_data=file_name_data.lower()
words = re.findall('\w+',file_name_data)
### END CODE HERE ###
return words
# Note, in the following cell, 'words' is converted to a python `set`. This eliminates any duplicate entries.
# In[3]:
#DO NOT MODIFY THIS CELL
word_l = process_data('shakespeare.txt')
vocab = set(word_l) # this will be your new vocabulary
print(f"The first ten words in the text are: \n{word_l[0:10]}")
print(f"There are {len(vocab)} unique words in the vocabulary.")
# #### Expected Output
# ```Python
# The first ten words in the text are:
# ['o', 'for', 'a', 'muse', 'of', 'fire', 'that', 'would', 'ascend', 'the']
# There are 6116 unique words in the vocabulary.
# ```
# <a name='ex-2'></a>
# ### Exercise 2
#
# Implement a `get_count` function that returns a dictionary
# - The dictionary's keys are words
# - The value for each word is the number of times that word appears in the corpus.
#
# For example, given the following sentence: **"I am happy because I am learning"**, your dictionary should return the following:
# <table style="width:20%">
#
# <tr>
# <td> <b>Key </b> </td>
# <td> <b>Value </b> </td>
#
#
# </tr>
# <tr>
# <td> I </td>
# <td> 2</td>
#
# </tr>
#
# <tr>
# <td>am</td>
# <td>2</td>
# </tr>
#
# <tr>
# <td>happy</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>because</td>
# <td>1</td>
# </tr>
#
# <tr>
# <td>learning</td>
# <td>1</td>
# </tr>
# </table>
#
#
# **Instructions**:
# Implement a `get_count` which returns a dictionary where the key is a word and the value is the number of times the word appears in the list.
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>Try implementing this using a for loop and a regular dictionary. This may be good practice for similar coding interview questions</li>
# <li>You can also use defaultdict instead of a regualr dictionary, along with the for loop</li>
# <li>Otherwise, to skip using a for loop, you can use Python's <a href="https://docs.python.org/3.7/library/collections.html#collections.Counter" > Counter class</a> </li>
# </ul>
# </p>
# In[4]:
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: get_count
def get_count(word_l):
'''
Input:
word_l: a set of words representing the corpus.
Output:
word_count_dict: The wordcount dictionary where key is the word and value is its frequency.
'''
word_count_dict = {} # fill this with word counts
### START CODE HERE
word_count_dict = Counter(word_l)
### END CODE HERE ###
return word_count_dict
# In[5]:
#DO NOT MODIFY THIS CELL
word_count_dict = get_count(word_l)
print(f"There are {len(word_count_dict)} key values pairs")
print(f"The count for the word 'thee' is {word_count_dict.get('thee',0)}")
#
# #### Expected Output
# ```Python
# There are 6116 key values pairs
# The count for the word 'thee' is 240
# ```
# <a name='ex-3'></a>
# ### Exercise 3
# Given the dictionary of word counts, compute the probability that each word will appear if randomly selected from the corpus of words.
#
# $$P(w_i) = \frac{C(w_i)}{M} \tag{Eqn-2}$$
# where
#
# $C(w_i)$ is the total number of times $w_i$ appears in the corpus.
#
# $M$ is the total number of words in the corpus.
#
# For example, the probability of the word 'am' in the sentence **'I am happy because I am learning'** is:
#
# $$P(am) = \frac{C(w_i)}{M} = \frac {2}{7} \tag{Eqn-3}.$$
#
# **Instructions:** Implement `get_probs` function which gives you the probability
# that a word occurs in a sample. This returns a dictionary where the keys are words, and the value for each word is its probability in the corpus of words.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# General advice
# <ul>
# <li> Use dictionary.values() </li>
# <li> Use sum() </li>
# <li> The cardinality (number of words in the corpus should be equal to len(word_l). You will calculate this same number, but using the word count dictionary.</li>
# </ul>
#
# If you're using a for loop:
# <ul>
# <li> Use dictionary.keys() </li>
# </ul>
#
# If you're using a dictionary comprehension:
# <ul>
# <li>Use dictionary.items() </li>
# </ul>
# </p>
#
# In[6]:
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: get_probs
def get_probs(word_count_dict):
'''
Input:
word_count_dict: The wordcount dictionary where key is the word and value is its frequency.
Output:
probs: A dictionary where keys are the words and the values are the probability that a word will occur.
'''
probs = {} # return this variable correctly
### START CODE HERE ###
m = sum(word_count_dict.values())
for key in word_count_dict.keys():
probs[key] = word_count_dict[key] / m
### END CODE HERE ###
return probs
# In[7]:
#DO NOT MODIFY THIS CELL
probs = get_probs(word_count_dict)
print(f"Length of probs is {len(probs)}")
print(f"P('thee') is {probs['thee']:.4f}")
# #### Expected Output
#
# ```Python
# Length of probs is 6116
# P('thee') is 0.0045
# ```
# <a name='2'></a>
# # Part 2: String Manipulations
#
# Now, that you have computed $P(w_i)$ for all the words in the corpus, you will write a few functions to manipulate strings so that you can edit the erroneous strings and return the right spellings of the words. In this section, you will implement four functions:
#
# * `delete_letter`: given a word, it returns all the possible strings that have **one character removed**.
# * `switch_letter`: given a word, it returns all the possible strings that have **two adjacent letters switched**.
# * `replace_letter`: given a word, it returns all the possible strings that have **one character replaced by another different letter**.
# * `insert_letter`: given a word, it returns all the possible strings that have an **additional character inserted**.
#
# #### List comprehensions
#
# String and list manipulation in python will often make use of a python feature called [list comprehensions](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions). The routines below will be described as using list comprehensions, but if you would rather implement them in another way, you are free to do so as long as the result is the same. Further, the following section will provide detailed instructions on how to use list comprehensions and how to implement the desired functions. If you are a python expert, feel free to skip the python hints and move to implementing the routines directly.
# Python List Comprehensions embed a looping structure inside of a list declaration, collapsing many lines of code into a single line. If you are not familiar with them, they seem slightly out of order relative to for loops.
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='GenericListComp3.PNG' alt="alternate text" width="width" height="height" style="width:800px;height:400px;"/> Figure 2 </div>
# The diagram above shows that the components of a list comprehension are the same components you would find in a typical for loop that appends to a list, but in a different order. With that in mind, we'll continue the specifics of this assignment. We will be very descriptive for the first function, `deletes()`, and less so in later functions as you become familiar with list comprehensions.
# <a name='ex-4'></a>
# ### Exercise 4
#
# **Instructions for delete_letter():** Implement a `delete_letter()` function that, given a word, returns a list of strings with one character deleted.
#
# For example, given the word **nice**, it would return the set: {'ice', 'nce', 'nic', 'nie'}.
#
# **Step 1:** Create a list of 'splits'. This is all the ways you can split a word into Left and Right: For example,
# 'nice is split into : `[('', 'nice'), ('n', 'ice'), ('ni', 'ce'), ('nic', 'e'), ('nice', '')]`
# This is common to all four functions (delete, replace, switch, insert).
#
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='Splits1.PNG' alt="alternate text" width="width" height="height" style="width:650px;height:200px;" /> Figure 3 </div>
# **Step 2:** This is specific to `delete_letter`. Here, we are generating all words that result from deleting one character.
# This can be done in a single line with a list comprehension. You can makes use of this type of syntax:
# `[f(a,b) for a, b in splits if condition]`
#
# For our 'nice' example you get:
# ['ice', 'nce', 'nie', 'nic']
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='ListComp2.PNG' alt="alternate text" width="width" height="height" style="width:550px;height:300px;" /> Figure 4 </div>
# #### Levels of assistance
#
# Try this exercise with these levels of assistance.
# - We hope that this will make it both a meaningful experience but also not a frustrating experience.
# - Start with level 1, then move onto level 2, and 3 as needed.
#
# - Level 1. Try to think this through and implement this yourself.
# - Level 2. Click on the "Level 2 Hints" section for some hints to get started.
# - Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions.
#
# - If you are still stuck, look at the images in the "list comprehensions" section above.
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Level 2 Hints</b></font>
# </summary>
# <p>
# <ul>
# <li><a href="" > Use array slicing like my_string[0:2] </a> </li>
# <li><a href="" > Use list comprehensions or for loops </a> </li>
# </ul>
# </p>
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Level 3 Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>splits: Use array slicing, like my_str[0:2], to separate a string into two pieces.</li>
# <li>Do this in a loop or list comprehension, so that you have a list of tuples.
# <li> For example, "cake" can get split into "ca" and "ke". They're stored in a tuple ("ca","ke"), and the tuple is appended to a list. We'll refer to these as L and R, so the tuple is (L,R)</li>
# <li>When choosing the range for your loop, if you input the word "cans" and generate the tuple ('cans',''), make sure to include an if statement to check the length of that right-side string (R) in the tuple (L,R) </li>
# <li>deletes: Go through the list of tuples and combine the two strings together. You can use the + operator to combine two strings</li>
# <li>When combining the tuples, make sure that you leave out a middle character.</li>
# <li>Use array slicing to leave out the first character of the right substring.</li>
# </ul>
# </p>
# In[8]:
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: deletes
def delete_letter(word, verbose=False):
'''
Input:
word: the string/word for which you will generate all possible words
in the vocabulary which have 1 missing character
Output:
delete_l: a list of all possible strings obtained by deleting 1 character from word
'''
delete_l = []
split_l = []
### START CODE HERE ###
for c in range(len(word)):
split_l.append((word[:c],word[c:]))
for a,b in split_l:
delete_l.append(a+b[1:])
### END CODE HERE ###
if verbose: print(f"input word {word}, \nsplit_l = {split_l}, \ndelete_l = {delete_l}")
return delete_l
# In[9]:
delete_word_l = delete_letter(word="cans",
verbose=True)
# #### Expected Output
# ```Python
# input word cans,
# split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's')],
# delete_l = ['ans', 'cns', 'cas', 'can']
# ```
# #### Note 1
# You might get a slightly different result with split_l.
# - Notice how it has the extra tuple `('cans', '')`.
# - This will be fine as long as you have checked the size of the right-side substring in tuple (L,R).
# - Can you explain why this will give you the same result for the list of deletion strings (delete_l)?
#
# ```Python
# input word cans,
# split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')],
# delete_l = ['ans', 'cns', 'cas', 'can']
# ```
# #### Note 2
# If you end up getting the same word as your input word, like this:
#
# ```Python
# input word cans,
# split_l = [('', 'cans'), ('c', 'ans'), ('ca', 'ns'), ('can', 's'), ('cans', '')],
# delete_l = ['ans', 'cns', 'cas', 'can', 'cans']
# ```
#
# - Check how you set the `range`.
# - See if you check the length of the string on the right-side of the split.
# In[10]:
# test # 2
print(f"Number of outputs of delete_letter('at') is {len(delete_letter('at'))}")
# #### Expected output
#
# ```CPP
# Number of outputs of delete_letter('at') is 2
# ```
# <a name='ex-5'></a>
# ### Exercise 5
#
# **Instructions for switch_letter()**: Now implement a function that switches two letters in a word. It takes in a word and returns a list of all the possible switches of two letters **that are adjacent to each other**.
# - For example, given the word 'eta', it returns {'eat', 'tea'}, but does not return 'ate'.
#
# **Step 1:** is the same as in delete_letter()
# **Step 2:** A list comprehension or for loop which forms strings by swapping adjacent letters. This is of the form:
# `[f(L,R) for L, R in splits if condition]` where 'condition' will test the length of R in a given iteration. See below.
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='Switches1.PNG' alt="alternate text" width="width" height="height" style="width:600px;height:200px;"/> Figure 5 </div>
# #### Levels of difficulty
#
# Try this exercise with these levels of difficulty.
# - Level 1. Try to think this through and implement this yourself.
# - Level 2. Click on the "Level 2 Hints" section for some hints to get started.
# - Level 3. If you would prefer more guidance, please click on the "Level 3 Hints" cell for step by step instructions.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Level 2 Hints</b></font>
# </summary>
# <p>
# <ul>
# <li><a href="" > Use array slicing like my_string[0:2] </a> </li>
# <li><a href="" > Use list comprehensions or for loops </a> </li>
# <li>To do a switch, think of the whole word as divided into 4 distinct parts. Write out 'cupcakes' on a piece of paper and see how you can split it into ('cupc', 'k', 'a', 'es')</li>
# </ul>
# </p>
#
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Level 3 Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>splits: Use array slicing, like my_str[0:2], to separate a string into two pieces.</li>
# <li>Splitting is the same as for delete_letter</li>
# <li>To perform the switch, go through the list of tuples and combine four strings together. You can use the + operator to combine strings</li>
# <li>The four strings will be the left substring from the split tuple, followed by the first (index 1) character of the right substring, then the zero-th character (index 0) of the right substring, and then the remaining part of the right substring.</li>
# <li>Unlike delete_letter, you will want to check that your right substring is at least a minimum length. To see why, review the previous hint bullet point (directly before this one).</li>
# </ul>
# </p>
# In[11]:
# UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: switches
def switch_letter(word, verbose=False):
'''
Input:
word: input string
Output:
switches: a list of all possible strings with one adjacent charater switched
'''
def swap(c, i, j):
c = list(c)
c[i], c[j] = c[j], c[i]
return ''.join(c)
switch_l = []
split_l = []
len_word=len(word)
### START CODE HERE ###
for c in range(len_word):
split_l.append((word[:c],word[c:]))
switch_l = [a + b[1] + b[0] + b[2:] for a,b in split_l if len(b) >= 2]
### END CODE HERE ###
if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nswitch_l = {switch_l}")
return switch_l
# In[12]:
switch_word_l = switch_letter(word="eta",
verbose=True)
# #### Expected output
#
# ```Python
# Input word = eta
# split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a')]
# switch_l = ['tea', 'eat']
# ```
# #### Note 1
#
# You may get this:
# ```Python
# Input word = eta
# split_l = [('', 'eta'), ('e', 'ta'), ('et', 'a'), ('eta', '')]
# switch_l = ['tea', 'eat']
# ```
# - Notice how it has the extra tuple `('eta', '')`.
# - This is also correct.
# - Can you think of why this is the case?
# #### Note 2
#
# If you get an error
# ```Python
# IndexError: string index out of range
# ```
# - Please see if you have checked the length of the strings when switching characters.
# In[13]:
# test # 2
print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}")
# #### Expected output
#
# ```CPP
# Number of outputs of switch_letter('at') is 1
# ```
# <a name='ex-6'></a>
# ### Exercise 6
# **Instructions for replace_letter()**: Now implement a function that takes in a word and returns a list of strings with one **replaced letter** from the original word.
#
# **Step 1:** is the same as in `delete_letter()`
#
# **Step 2:** A list comprehension or for loop which form strings by replacing letters. This can be of the form:
# `[f(a,b,c) for a, b in splits if condition for c in string]` Note the use of the second for loop.
# It is expected in this routine that one or more of the replacements will include the original word. For example, replacing the first letter of 'ear' with 'e' will return 'ear'.
#
# **Step 3:** Remove the original input letter from the output.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>To remove a word from a list, first store its contents inside a set()</li>
# <li>Use set.discard('the_word') to remove a word in a set (if the word does not exist in the set, then it will not throw a KeyError. Using set.remove('the_word') throws a KeyError if the word does not exist in the set. </li>
# </ul>
# </p>
#
# In[14]:
# UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: replaces
def replace_letter(word, verbose=False):
'''
Input:
word: the input string/word
Output:
replaces: a list of all possible strings where we replaced one letter from the original word.
'''
letters = 'abcdefghijklmnopqrstuvwxyz'
replace_l = []
split_l = []
### START CODE HERE ###
for c in range(len(word)):
split_l.append((word[0:c],word[c:]))
replace_l = [a + l + (b[1:] if len(b)> 1 else '') for a,b in split_l if b for l in letters]
replace_set=set(replace_l)
replace_set.remove(word)
### END CODE HERE ###
# turn the set back into a list and sort it, for easier viewing
replace_l = sorted(list(replace_set))
if verbose: print(f"Input word = {word} \nsplit_l = {split_l} \nreplace_l {replace_l}")
return replace_l
# In[15]:
replace_l = replace_letter(word='can',
verbose=True)
# #### Expected Output**:
# ```Python
# Input word = can
# split_l = [('', 'can'), ('c', 'an'), ('ca', 'n')]
# replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']
# ```
# - Note how the input word 'can' should not be one of the output words.
# #### Note 1
# If you get something like this:
#
# ```Python
# Input word = can
# split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')]
# replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']
# ```
# - Notice how split_l has an extra tuple `('can', '')`, but the output is still the same, so this is okay.
# #### Note 2
# If you get something like this:
# ```Python
# Input word = can
# split_l = [('', 'can'), ('c', 'an'), ('ca', 'n'), ('can', '')]
# replace_l ['aan', 'ban', 'caa', 'cab', 'cac', 'cad', 'cae', 'caf', 'cag', 'cah', 'cai', 'caj', 'cak', 'cal', 'cam', 'cana', 'canb', 'canc', 'cand', 'cane', 'canf', 'cang', 'canh', 'cani', 'canj', 'cank', 'canl', 'canm', 'cann', 'cano', 'canp', 'canq', 'canr', 'cans', 'cant', 'canu', 'canv', 'canw', 'canx', 'cany', 'canz', 'cao', 'cap', 'caq', 'car', 'cas', 'cat', 'cau', 'cav', 'caw', 'cax', 'cay', 'caz', 'cbn', 'ccn', 'cdn', 'cen', 'cfn', 'cgn', 'chn', 'cin', 'cjn', 'ckn', 'cln', 'cmn', 'cnn', 'con', 'cpn', 'cqn', 'crn', 'csn', 'ctn', 'cun', 'cvn', 'cwn', 'cxn', 'cyn', 'czn', 'dan', 'ean', 'fan', 'gan', 'han', 'ian', 'jan', 'kan', 'lan', 'man', 'nan', 'oan', 'pan', 'qan', 'ran', 'san', 'tan', 'uan', 'van', 'wan', 'xan', 'yan', 'zan']
# ```
# - Notice how there are strings that are 1 letter longer than the original word, such as `cana`.
# - Please check for the case when there is an empty string `''`, and if so, do not use that empty string when setting replace_l.
# In[16]:
# test # 2
print(f"Number of outputs of switch_letter('at') is {len(switch_letter('at'))}")
# #### Expected output
# ```CPP
# Number of outputs of switch_letter('at') is 1
# ```
# <a name='ex-7'></a>
# ### Exercise 7
#
# **Instructions for insert_letter()**: Now implement a function that takes in a word and returns a list with a letter inserted at every offset.
#
# **Step 1:** is the same as in `delete_letter()`
#
# **Step 2:** This can be a list comprehension of the form:
# `[f(a,b,c) for a, b in splits if condition for c in string]`
# In[17]:
# UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: inserts
def insert_letter(word, verbose=False):
'''
Input:
word: the input string/word
Output:
inserts: a set of all possible strings with one new letter inserted at every offset
'''
letters = 'abcdefghijklmnopqrstuvwxyz'
insert_l = []
split_l = []
### START CODE HERE ###
for c in range(len(word)+1):
split_l.append((word[0:c],word[c:]))
insert_l = [ a + l + b for a,b in split_l for l in letters]
### END CODE HERE ###
if verbose: print(f"Input word {word} \nsplit_l = {split_l} \ninsert_l = {insert_l}")
return insert_l
# In[18]:
insert_l = insert_letter('at', True)
print(f"Number of strings output by insert_letter('at') is {len(insert_l)}")
# #### Expected output
#
# ```Python
# Input word at
# split_l = [('', 'at'), ('a', 't'), ('at', '')]
# insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz']
# Number of strings output by insert_letter('at') is 78
# ```
# #### Note 1
#
# If you get a split_l like this:
# ```Python
# Input word at
# split_l = [('', 'at'), ('a', 't')]
# insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']
# Number of strings output by insert_letter('at') is 52
# ```
# - Notice that split_l is missing the extra tuple ('at', ''). For insertion, we actually **WANT** this tuple.
# - The function is not creating all the desired output strings.
# - Check the range that you use for the for loop.
# #### Note 2
# If you see this:
# ```Python
# Input word at
# split_l = [('', 'at'), ('a', 't'), ('at', '')]
# insert_l = ['aat', 'bat', 'cat', 'dat', 'eat', 'fat', 'gat', 'hat', 'iat', 'jat', 'kat', 'lat', 'mat', 'nat', 'oat', 'pat', 'qat', 'rat', 'sat', 'tat', 'uat', 'vat', 'wat', 'xat', 'yat', 'zat', 'aat', 'abt', 'act', 'adt', 'aet', 'aft', 'agt', 'aht', 'ait', 'ajt', 'akt', 'alt', 'amt', 'ant', 'aot', 'apt', 'aqt', 'art', 'ast', 'att', 'aut', 'avt', 'awt', 'axt', 'ayt', 'azt']
# Number of strings output by insert_letter('at') is 52
# ```
#
# - Even though you may have fixed the split_l so that it contains the tuple `('at', '')`, notice that you're still missing some output strings.
# - Notice that it's missing strings such as 'ata', 'atb', 'atc' all the way to 'atz'.
# - To fix this, make sure that when you set insert_l, you allow the use of the empty string `''`.
# In[19]:
# test # 2
print(f"Number of outputs of insert_letter('at') is {len(insert_letter('at'))}")
# #### Expected output
#
# ```CPP
# Number of outputs of insert_letter('at') is 78
# ```
# <a name='3'></a>
#
# # Part 3: Combining the edits
#
# Now that you have implemented the string manipulations, you will create two functions that, given a string, will return all the possible single and double edits on that string. These will be `edit_one_letter()` and `edit_two_letters()`.
# <a name='3-1'></a>
# ## 3.1 Edit one letter
#
# <a name='ex-8'></a>
# ### Exercise 8
#
# **Instructions**: Implement the `edit_one_letter` function to get all the possible edits that are one edit away from a word. The edits consist of the replace, insert, delete, and optionally the switch operation. You should use the previous functions you have already implemented to complete this function. The 'switch' function is a less common edit function, so its use will be selected by an "allow_switches" input argument.
#
# Note that those functions return *lists* while this function should return a *python set*. Utilizing a set eliminates any duplicate entries.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li> Each of the functions returns a list. You can combine lists using the `+` operator. </li>
# <li> To get unique strings (avoid duplicates), you can use the set() function. </li>
# </ul>
# </p>
#
# In[22]:
# UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: edit_one_letter
def edit_one_letter(word, allow_switches = True):
"""
Input:
word: the string/word for which we will generate all possible wordsthat are one edit away.
Output:
edit_one_set: a set of words with one possible edit. Please return a set. and not a list.
"""
edit_one_set = set()
### START CODE HERE ###
edit_one_set.update(delete_letter(word))
if allow_switches:
edit_one_set.update(switch_letter(word))
edit_one_set.update(replace_letter(word))
edit_one_set.update(insert_letter(word))
### END CODE HERE ###
return edit_one_set
# In[23]:
tmp_word = "at"
tmp_edit_one_set = edit_one_letter(tmp_word)
# turn this into a list to sort it, in order to view it
tmp_edit_one_l = sorted(list(tmp_edit_one_set))
print(f"input word {tmp_word} \nedit_one_l \n{tmp_edit_one_l}\n")
print(f"The type of the returned object should be a set {type(tmp_edit_one_set)}")
print(f"Number of outputs from edit_one_letter('at') is {len(edit_one_letter('at'))}")
# #### Expected Output
# ```CPP
# input word at
# edit_one_l
# ['a', 'aa', 'aat', 'ab', 'abt', 'ac', 'act', 'ad', 'adt', 'ae', 'aet', 'af', 'aft', 'ag', 'agt', 'ah', 'aht', 'ai', 'ait', 'aj', 'ajt', 'ak', 'akt', 'al', 'alt', 'am', 'amt', 'an', 'ant', 'ao', 'aot', 'ap', 'apt', 'aq', 'aqt', 'ar', 'art', 'as', 'ast', 'ata', 'atb', 'atc', 'atd', 'ate', 'atf', 'atg', 'ath', 'ati', 'atj', 'atk', 'atl', 'atm', 'atn', 'ato', 'atp', 'atq', 'atr', 'ats', 'att', 'atu', 'atv', 'atw', 'atx', 'aty', 'atz', 'au', 'aut', 'av', 'avt', 'aw', 'awt', 'ax', 'axt', 'ay', 'ayt', 'az', 'azt', 'bat', 'bt', 'cat', 'ct', 'dat', 'dt', 'eat', 'et', 'fat', 'ft', 'gat', 'gt', 'hat', 'ht', 'iat', 'it', 'jat', 'jt', 'kat', 'kt', 'lat', 'lt', 'mat', 'mt', 'nat', 'nt', 'oat', 'ot', 'pat', 'pt', 'qat', 'qt', 'rat', 'rt', 'sat', 'st', 't', 'ta', 'tat', 'tt', 'uat', 'ut', 'vat', 'vt', 'wat', 'wt', 'xat', 'xt', 'yat', 'yt', 'zat', 'zt']
#
# The type of the returned object should be a set <class 'set'>
# Number of outputs from edit_one_letter('at') is 129
# ```
# <a name='3-2'></a>
# ## Part 3.2 Edit two letters
#
# <a name='ex-9'></a>
# ### Exercise 9
#
# Now you can generalize this to implement to get two edits on a word. To do so, you would have to get all the possible edits on a single word and then for each modified word, you would have to modify it again.
#
# **Instructions**: Implement the `edit_two_letters` function that returns a set of words that are two edits away. Note that creating additional edits based on the `edit_one_letter` function may 'restore' some one_edits to zero or one edits. That is allowed here. This accounted for in get_corrections.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>You will likely want to take the union of two sets.</li>
# <li>You can either use set.union() or use the '|' (or operator) to union two sets</li>
# <li>See the documentation <a href="https://docs.python.org/2/library/sets.html" > Python sets </a> for examples of using operators or functions of the Python set.</li>
# </ul>
# </p>
#
# In[24]:
# UNQ_C9 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: edit_two_letters
def edit_two_letters(word, allow_switches = True):
'''
Input:
word: the input string/word
Output:
edit_two_set: a set of strings with all possible two edits
'''
edit_two_set = set()
### START CODE HERE ###
edit_one = edit_one_letter(word,allow_switches=allow_switches)
for w in edit_one:
if w:
edit_two = edit_one_letter(w,allow_switches=allow_switches)
edit_two_set.update(edit_two)
### END CODE HERE ###
return edit_two_set
# In[25]:
tmp_edit_two_set = edit_two_letters("a")
tmp_edit_two_l = sorted(list(tmp_edit_two_set))
print(f"Number of strings with edit distance of two: {len(tmp_edit_two_l)}")
print(f"First 10 strings {tmp_edit_two_l[:10]}")
print(f"Last 10 strings {tmp_edit_two_l[-10:]}")
print(f"The data type of the returned object should be a set {type(tmp_edit_two_set)}")
print(f"Number of strings that are 2 edit distances from 'at' is {len(edit_two_letters('at'))}")
# #### Expected Output
#
# ```CPP
# Number of strings with edit distance of two: 2654
# First 10 strings ['', 'a', 'aa', 'aaa', 'aab', 'aac', 'aad', 'aae', 'aaf', 'aag']
# Last 10 strings ['zv', 'zva', 'zw', 'zwa', 'zx', 'zxa', 'zy', 'zya', 'zz', 'zza']
# The data type of the returned object should be a set <class 'set'>
# Number of strings that are 2 edit distances from 'at' is 7154
# ```
# <a name='3-3'></a>
# ## Part 3-3: suggest spelling suggestions
#
# Now you will use your `edit_two_letters` function to get a set of all the possible 2 edits on your word. You will then use those strings to get the most probable word you meant to type aka your typing suggestion.
#
# <a name='ex-10'></a>
# ### Exercise 10
# **Instructions**: Implement `get_corrections`, which returns a list of zero to n possible suggestion tuples of the form (word, probability_of_word).
#
# **Step 1:** Generate suggestions for a supplied word: You'll use the edit functions you have developed. The 'suggestion algorithm' should follow this logic:
# * If the word is in the vocabulary, suggest the word.
# * Otherwise, if there are suggestions from `edit_one_letter` that are in the vocabulary, use those.
# * Otherwise, if there are suggestions from `edit_two_letters` that are in the vocabulary, use those.
# * Otherwise, suggest the input word.*
# * The idea is that words generated from fewer edits are more likely than words with more edits.
#
#
# Note:
# - Edits of one or two letters may 'restore' strings to either zero or one edit. This algorithm accounts for this by preferentially selecting lower distance edits first.
# #### Short circuit
# In Python, logical operations such as `and` and `or` have two useful properties. They can operate on lists and they have ['short-circuit' behavior](https://docs.python.org/3/library/stdtypes.html). Try these:
# In[26]:
# example of logical operation on lists or sets
print( [] and ["a","b"] )
print( [] or ["a","b"] )
#example of Short circuit behavior
val1 = ["Most","Likely"] or ["Less","so"] or ["least","of","all"] # selects first, does not evalute remainder
print(val1)
val2 = [] or [] or ["least","of","all"] # continues evaluation until there is a non-empty list
print(val2)
# The logical `or` could be used to implement the suggestion algorithm very compactly. Alternately, if/then constructs could be used.
#
# **Step 2**: Create a 'best_words' dictionary where the 'key' is a suggestion and the 'value' is the probability of that word in your vocabulary. If the word is not in the vocabulary, assign it a probability of 0.
#
# **Step 3**: Select the n best suggestions. There may be fewer than n.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>edit_one_letter and edit_two_letters return *python sets*. </li>
# <li> Sets have a handy <a href="https://docs.python.org/2/library/sets.html" > set.intersection </a> feature</li>
# <li>To find the keys that have the highest values in a dictionary, you can use the Counter dictionary to create a Counter object from a regular dictionary. Then you can use Counter.most_common(n) to get the n most common keys.
# </li>
# <li>To find the intersection of two sets, you can use set.intersection or the & operator.</li>
# <li>If you are not as familiar with short circuit syntax (as shown above), feel free to use if else statements instead.</li>
# <li>To use an if statement to check of a set is empty, use 'if not x:' syntax </li>
# </ul>
# </p>
#
# In[27]:
# UNQ_C10 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# UNIT TEST COMMENT: Candidate for Table Driven Tests
# GRADED FUNCTION: get_corrections
def get_corrections(word, probs, vocab, n=2, verbose = False):
'''
Input:
word: a user entered string to check for suggestions
probs: a dictionary that maps each word to its probability in the corpus
vocab: a set containing all the vocabulary
n: number of possible word corrections you want returned in the dictionary
Output:
n_best: a list of tuples with the most probable n corrected words and their probabilities.
'''
suggestions = []
n_best = []
### START CODE HERE ###
suggestions = list((word in vocab and word) or edit_one_letter(word).intersection(vocab) or edit_two_letters(word).intersection(vocab))
n_best = [[s,probs[s]] for s in list(reversed(suggestions))]
### END CODE HERE ###
if verbose: print("suggestions = ", suggestions)
return n_best
# In[28]:
# Test your implementation - feel free to try other words in my word
my_word = 'dys'
tmp_corrections = get_corrections(my_word, probs, vocab, 2, verbose=True)
for i, word_prob in enumerate(tmp_corrections):
print(f"word {i}: {word_prob[0]}, probability {word_prob[1]:.6f}")
# CODE REVIEW COMMENT: using "tmp_corrections" insteads of "cors". "cors" is not defined
print(f"data type of corrections {type(tmp_corrections)}")
# #### Expected Output
# ```CPP
# word 0: days, probability 0.000410
# word 1: dye, probability 0.000019
# data type of corrections <class 'list'>
# ```
# <a name='4'></a>
# # Part 4: Minimum Edit distance
#
# Now that you have implemented your auto-correct, how do you evaluate the similarity between two strings? For example: 'waht' and 'what'
#
# Also how do you efficiently find the shortest path to go from the word, 'waht' to the word 'what'?
#
# You will implement a dynamic programming system that will tell you the minimum number of edits required to convert a string into another string.
# <a name='4-1'></a>
# ### Part 4.1 Dynamic Programming
#
# Dynamic Programming breaks a problem down into subproblems which can be combined to form the final solution. Here, given a string source[0..i] and a string target[0..j], we will compute all the combinations of substrings[i, j] and calculate their edit distance. To do this efficiently, we will use a table to maintain the previously computed substrings and use those to calculate larger substrings.
#
# You have to create a matrix and update each element in the matrix as follows:
# $$\text{Initialization}$$
#
# \begin{align}
# D[0,0] &= 0 \\
# D[i,0] &= D[i-1,0] + del\_cost(source[i]) \tag{4}\\
# D[0,j] &= D[0,j-1] + ins\_cost(target[j]) \\
# \end{align}
#
# $$\text{Per Cell Operations}$$
# \begin{align}
# \\
# D[i,j] =min
# \begin{cases}
# D[i-1,j] + del\_cost\\
# D[i,j-1] + ins\_cost\\
# D[i-1,j-1] + \left\{\begin{matrix}
# rep\_cost; & if src[i]\neq tar[j]\\
# 0 ; & if src[i]=tar[j]
# \end{matrix}\right.
# \end{cases}
# \tag{5}
# \end{align}
# So converting the source word **play** to the target word **stay**, using an input cost of one, a delete cost of 1, and replace cost of 2 would give you the following table:
# <table style="width:20%">
#
# <tr>
# <td> <b> </b> </td>
# <td> <b># </b> </td>
# <td> <b>s </b> </td>
# <td> <b>t </b> </td>
# <td> <b>a </b> </td>
# <td> <b>y </b> </td>
# </tr>
# <tr>
# <td> <b> # </b></td>
# <td> 0</td>
# <td> 1</td>
# <td> 2</td>
# <td> 3</td>
# <td> 4</td>
#
# </tr>
# <tr>
# <td> <b> p </b></td>
# <td> 1</td>
# <td> 2</td>
# <td> 3</td>
# <td> 4</td>
# <td> 5</td>
# </tr>
#
# <tr>
# <td> <b> l </b></td>
# <td>2</td>
# <td>3</td>
# <td>4</td>
# <td>5</td>
# <td>6</td>
# </tr>
#
# <tr>
# <td> <b> a </b></td>
# <td>3</td>
# <td>4</td>
# <td>5</td>
# <td>4</td>
# <td>5</td>
# </tr>
#
# <tr>
# <td> <b> y </b></td>
# <td>4</td>
# <td>5</td>
# <td>6</td>
# <td>5</td>
# <td>4</td>
# </tr>
#
#
# </table>
#
#
# The operations used in this algorithm are 'insert', 'delete', and 'replace'. These correspond to the functions that you defined earlier: insert_letter(), delete_letter() and replace_letter(). switch_letter() is not used here.
# The diagram below describes how to initialize the table. Each entry in D[i,j] represents the minimum cost of converting string source[0:i] to string target[0:j]. The first column is initialized to represent the cumulative cost of deleting the source characters to convert string "EER" to "". The first row is initialized to represent the cumulative cost of inserting the target characters to convert from "" to "NEAR".
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='EditDistInit4.PNG' alt="alternate text" width="width" height="height" style="width:1000px;height:400px;"/> Figure 6 Initializing Distance Matrix</div>
# Filling in the remainder of the table utilizes the 'Per Cell Operations' in the equation (5) above. Note, the diagram below includes in the table some of the 3 sub-calculations shown in light grey. Only 'min' of those operations is stored in the table in the `min_edit_distance()` function.
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='EditDistFill2.PNG' alt="alternate text" width="width" height="height" style="width:800px;height:400px;"/> Figure 7 Filling Distance Matrix</div>
# Note that the formula for $D[i,j]$ shown in the image is equivalent to:
#
# \begin{align}
# \\
# D[i,j] =min
# \begin{cases}
# D[i-1,j] + del\_cost\\
# D[i,j-1] + ins\_cost\\
# D[i-1,j-1] + \left\{\begin{matrix}
# rep\_cost; & if src[i]\neq tar[j]\\
# 0 ; & if src[i]=tar[j]
# \end{matrix}\right.
# \end{cases}
# \tag{5}
# \end{align}
#
# The variable `sub_cost` (for substitution cost) is the same as `rep_cost`; replacement cost. We will stick with the term "replace" whenever possible.
# Below are some examples of cells where replacement is used. This also shows the minimum path from the lower right final position where "EER" has been replaced by "NEAR" back to the start. This provides a starting point for the optional 'backtrace' algorithm below.
# <div style="width:image width px; font-size:100%; text-align:center;"><img src='EditDistExample1.PNG' alt="alternate text" width="width" height="height" style="width:1200px;height:400px;"/> Figure 8 Examples Distance Matrix</div>
# <a name='ex-11'></a>
# ### Exercise 11
#
# Again, the word "substitution" appears in the figure, but think of this as "replacement".
# **Instructions**: Implement the function below to get the minimum amount of edits required given a source string and a target string.
# <details>
# <summary>
# <font size="3" color="darkgreen"><b>Hints</b></font>
# </summary>
# <p>
# <ul>
# <li>The range(start, stop, step) function excludes 'stop' from its output</li>
# <li><a href="" > words </a> </li>
# </ul>
# </p>
#
# In[29]:
# UNQ_C11 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: min_edit_distance
def min_edit_distance(source, target, ins_cost = 1, del_cost = 1, rep_cost = 2):
'''
Input:
source: a string corresponding to the string you are starting with
target: a string corresponding to the string you want to end with
ins_cost: an integer setting the insert cost
del_cost: an integer setting the delete cost
rep_cost: an integer setting the replace cost
Output:
D: a matrix of len(source)+1 by len(target)+1 containing minimum edit distances
med: the minimum edit distance (med) required to convert the source string to the target
'''
# use deletion and insert cost as 1
m = len(source)
n = len(target)
#initialize cost matrix with zeros and dimensions (m+1,n+1)
D = np.zeros((m+1, n+1), dtype=int)
### START CODE HERE (Replace instances of 'None' with your code) ###
# Fill in column 0, from row 1 to row m
for row in range(1,m+1): # Replace None with the proper range
D[row,0] = D[row-1,0] + del_cost
# Fill in row 0, for all columns from 1 to n
for col in range(1,n+1): # Replace None with the proper range
D[0,col] = D[0,col-1] + ins_cost
# Loop through row 1 to row m
for row in range(1,m+1):
# Loop through column 1 to column n
for col in range(1,n+1):
# Intialize r_cost to the 'replace' cost that is passed into this function
r_cost = rep_cost
# Check to see if source character at the previous row
# matches the target character at the previous column,
if source[row-1] == target[col-1]:
# Update the replacement cost to 0 if source and target are the same
r_cost = 0
# Update the cost at row, col based on previous entries in the cost matrix
# Refer to the equation calculate for D[i,j] (the minimum of three calculated costs)
D[row,col] = min([D[row-1,col]+del_cost, D[row,col-1]+ins_cost, D[row-1,col-1]+r_cost])
# Set the minimum edit distance with the cost found at row m, column n
med = D[m,n]
### END CODE HERE ###
return D, med
# In[30]:
#DO NOT MODIFY THIS CELL
# testing your implementation
source = 'play'
target = 'stay'
matrix, min_edits = min_edit_distance(source, target)
print("minimum edits: ",min_edits, "\n")
idx = list('#' + source)
cols = list('#' + target)
df = pd.DataFrame(matrix, index=idx, columns= cols)
print(df)
# **Expected Results:**
#
# ```CPP
# # s t a y
# # 0 1 2 3 4
# p 1 2 3 4 5
# l 2 3 4 5 6
# a 3 4 5 4 5
# y 4 5 6 5 4
# ```
# In[31]:
#DO NOT MODIFY THIS CELL
# testing your implementation
source = 'eer'
target = 'near'
matrix, min_edits = min_edit_distance(source, target)
print("minimum edits: ",min_edits, "\n")
idx = list(source)
idx.insert(0, '#')
cols = list(target)
cols.insert(0, '#')
df = pd.DataFrame(matrix, index=idx, columns= cols)
print(df)
# **Expected Results**
# ```CPP
# minimum edits: 3
#
# # n e a r
# # 0 1 2 3 4
# e 1 2 1 2 3
# e 2 3 2 3 4
# r 3 4 3 4 3
# ```
# We can now test several of our routines at once:
# In[32]:
source = "eer"
targets = edit_one_letter(source,allow_switches = False) #disable switches since min_edit_distance does not include them
for t in targets:
_, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one
if min_edits != 1: print(source, t, min_edits)
# **Expected Results:** (empty)
#
# The 'replace()' routine utilizes all letters a-z one of which returns the original word.
# In[33]:
source = "eer"
targets = edit_two_letters(source,allow_switches = False) #disable switches since min_edit_distance does not include them
for t in targets:
_, min_edits = min_edit_distance(source, t,1,1,1) # set ins, del, sub costs all to one
if min_edits != 2 and min_edits != 1: print(source, t, min_edits)
# **Expected Results:** eer eer 0
# We have to allow single edits here because some two_edits will restore a single edit.
# # Submission
# Make sure you submit your assignment before you modify anything below
#
# <a name='5'></a>
#
# # Part 5: Optional - Backtrace
#
#
# Once you have computed your matrix using minimum edit distance, how would find the shortest path from the top left corner to the bottom right corner?
#
# Note that you could use backtrace algorithm. Try to find the shortest path given the matrix that your `min_edit_distance` function returned.
#
# You can use these [lecture slides on minimum edit distance](https://web.stanford.edu/class/cs124/lec/med.pdf) by Dan Jurafsky to learn about the algorithm for backtrace.
# In[34]:
# Experiment with back trace - insert your code here
# #### References
# - Dan Jurafsky - Speech and Language Processing - Textbook
# - This auto-correct explanation was first done by Peter Norvig in 2007
# In[ ]:
|
[
"rohitrathore.imh55@gmail.com"
] |
rohitrathore.imh55@gmail.com
|
6801916cae044b832c140f900cb07a3e20edc420
|
a239b724547ee54281d797e6cbc39716a1f0b7c5
|
/Day01-15/Day13/code/mytest.py
|
2bb944f5b4e8a715c76763ba9b4c4497c36a2b75
|
[] |
no_license
|
Zhaohl2019/Python-100-Days
|
967545483a36e4abd5c2ce585798c86ed221acb6
|
398819389c0de8e984560f679c5b00f7b854a1d8
|
refs/heads/master
| 2020-05-29T13:29:58.600144
| 2019-07-20T08:02:15
| 2019-07-20T08:02:15
| 189,164,387
| 0
| 0
| null | 2019-05-29T06:31:48
| 2019-05-29T06:31:47
| null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
from multiprocessing import Process, Queue
from random import randint
from time import time
def task_handler(curr_list, result_queue):
total = 0
for number in curr_list:
total += number
result_queue.put(total)
def main():
processes = []
number_list = [x for x in range(1, 100000001)]
result_queue = Queue()
index = 0
# 启动8个进程将数据切片后进行运算
for _ in range(8):
p = Process(target=task_handler,
args=(number_list[index:index + 12500000], result_queue))
index += 12500000
processes.append(p)
p.start()
# 开始记录所有进程执行完成花费的时间
start = time()
for p in processes:
p.join()
# 合并执行结果
total = 0
while not result_queue.empty():
total += result_queue.get()
print(total)
end = time()
print('Execution time: ', (end - start), 's', sep='0')
if __name__ == '__main__':
main()
|
[
"zhaohl1998@163.com"
] |
zhaohl1998@163.com
|
287bfff4c301f0789b34977c1307afdf42f0f6b5
|
ac2229b4657e69b1502b9fd6760c089410e035e3
|
/33. pascalTree.py
|
1be8d7237f1891126824dfc384b7e3de2ba14dd0
|
[] |
no_license
|
ankitk2109/LeetCode
|
4a192804d69f3f056838a46ede29a79e0d2adc45
|
1ed647982e9b5ee9af0a2e2dd905461a010d2dfb
|
refs/heads/master
| 2021-07-04T11:00:10.497368
| 2021-05-09T11:43:46
| 2021-05-09T11:43:46
| 232,608,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
#PS: https://leetcode.com/problems/pascals-triangle/submissions/
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
res = []
if numRows == 1:
return [[1]]
for i in range(1,numRows+1):
cur_row = [1]*i #initializing all the values with 1
for j in range(1,i-1): #Will exclude 1st and last index of previous resultant row
cur_row[j] = res[i-2][j-1] + res[i-2][j]
res.append(cur_row)
return res
|
[
"ankitk2109@gmail.com"
] |
ankitk2109@gmail.com
|
e1693fa0ce8a01fcb25ae182cd11423517e0f616
|
b12ffed1e2ee4ad8bf74abb412e772aad2a195d8
|
/data_wrangle/src/parse_excel.py
|
1adfe06ab50c3769a13d761c3cb8c2a73ca22385
|
[] |
no_license
|
scuate/MOOC
|
744f4ea2b53f47a4109251f6d82b1f462b025595
|
f72018171ac7565d4251c0f89319e20c3ab0fbc8
|
refs/heads/master
| 2020-06-09T06:20:37.040541
| 2015-12-28T01:42:01
| 2015-12-28T01:42:01
| 37,950,045
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,646
|
py
|
##get the max loads across all stations in the xls datafile
import xlrd
import os
import csv
from zipfile import ZipFile
datafile = "2013_ERCOT_Hourly_Load_Data.xls"
outfile = "2013_Max_Loads.csv"
def open_zip(datafile):
with ZipFile('{0}.zip'.format(datafile), 'r') as myzip:
myzip.extractall()
def parse_file(datafile):
workbook = xlrd.open_workbook(datafile)
sheet = workbook.sheet_by_index(0)
data=[]
stations = ['COAST', 'EAST', 'FAR_WEST', 'NORTH', 'NORTH_C', 'SOUTHERN', 'SOUTH_C', 'WEST']
for i in range(8):
cv = sheet.col_values(i+1,start_rowx=1,end_rowx=None)
maxload = max(cv)
maxpos = cv.index(maxload)+1
maxtime = sheet.cell_value(maxpos,0)
realtime = xlrd.xldate_as_tuple(maxtime,0)[:4]
data.append([stations[i]]+list(realtime)+[round(maxload,1)])
return data
def save_file(data, filename):
with open(filename, 'wb') as csvfile:
datawriter=csv.writer(csvfile,delimiter="|")
datawriter.writerow(["Station","Year","Month","Day","Hour","Max Load"])
for i in range(8):
datawriter.writerow(data[i])
csvfile.close()
def test():
data = parse_file(datafile)
save_file(data, outfile)
number_of_rows = 0
stations = []
ans = {'FAR_WEST': {'Max Load': '2281.2722140000024',
'Year': '2013',
'Month': '6',
'Day': '26',
'Hour': '17'}}
correct_stations = ['COAST', 'EAST', 'FAR_WEST', 'NORTH',
'NORTH_C', 'SOUTHERN', 'SOUTH_C', 'WEST']
fields = ['Year', 'Month', 'Day', 'Hour', 'Max Load']
with open(outfile) as of:
csvfile = csv.DictReader(of, delimiter="|")
for line in csvfile:
station = line['Station']
if station == 'FAR_WEST':
for field in fields:
# Check if 'Max Load' is within .1 of answer
if field == 'Max Load':
max_answer = round(float(ans[station][field]), 1)
max_line = round(float(line[field]), 1)
assert max_answer == max_line
# Otherwise check for equality
else:
assert ans[station][field] == line[field]
number_of_rows += 1
stations.append(station)
# Output should be 8 lines not including header
assert number_of_rows == 8
# Check Station Names
assert set(stations) == set(correct_stations)
if __name__ == "__main__":
test()
|
[
"xuexiaosu@gmail.com"
] |
xuexiaosu@gmail.com
|
2282c152c06546a82f340c485eafb5b3ed595424
|
f124cb2443577778d8708993c984eafbd1ae3ec3
|
/saleor/graphql/checkout/mutations/__init__.py
|
7718967b325810e573ce4fd4fe617dcc87905933
|
[
"BSD-3-Clause"
] |
permissive
|
quangtynu/saleor
|
ac467193a7779fed93c80251828ac85d92d71d83
|
5b0e5206c5fd30d81438b6489d0441df51038a85
|
refs/heads/master
| 2023-03-07T19:41:20.361624
| 2022-10-20T13:19:25
| 2022-10-20T13:19:25
| 245,860,106
| 1
| 0
|
BSD-3-Clause
| 2023-03-06T05:46:25
| 2020-03-08T17:44:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
from .checkout_add_promo_code import CheckoutAddPromoCode
from .checkout_billing_address_update import CheckoutBillingAddressUpdate
from .checkout_complete import CheckoutComplete
from .checkout_create import CheckoutCreate
from .checkout_customer_attach import CheckoutCustomerAttach
from .checkout_customer_detach import CheckoutCustomerDetach
from .checkout_delivery_method_update import CheckoutDeliveryMethodUpdate
from .checkout_email_update import CheckoutEmailUpdate
from .checkout_language_code_update import CheckoutLanguageCodeUpdate
from .checkout_line_delete import CheckoutLineDelete
from .checkout_lines_add import CheckoutLinesAdd
from .checkout_lines_delete import CheckoutLinesDelete
from .checkout_lines_update import CheckoutLinesUpdate
from .checkout_remove_promo_code import CheckoutRemovePromoCode
from .checkout_shipping_address_update import CheckoutShippingAddressUpdate
from .checkout_shipping_method_update import CheckoutShippingMethodUpdate
from .order_create_from_checkout import OrderCreateFromCheckout
__all__ = [
"CheckoutAddPromoCode",
"CheckoutBillingAddressUpdate",
"CheckoutComplete",
"CheckoutCreate",
"CheckoutCustomerAttach",
"CheckoutCustomerDetach",
"CheckoutDeliveryMethodUpdate",
"CheckoutEmailUpdate",
"CheckoutLanguageCodeUpdate",
"CheckoutLineDelete",
"CheckoutLinesAdd",
"CheckoutLinesDelete",
"CheckoutLinesUpdate",
"CheckoutRemovePromoCode",
"CheckoutShippingAddressUpdate",
"CheckoutShippingMethodUpdate",
"OrderCreateFromCheckout",
]
|
[
"noreply@github.com"
] |
quangtynu.noreply@github.com
|
838f2f8902ca4fdcf743b209c0a1ff7c7ab3412d
|
229ed0dad61f9e855de604c230d034a0bd9b3882
|
/EdabitPractice/evenOddCounter.py
|
06c675245b08e406cfb9c3b1124f90e1dd4de379
|
[] |
no_license
|
Darrenrodricks/EdabitPythonPractice
|
987d534dd149ddaef6219df381df850eabbe80b2
|
c1be8b10a6fcc1085640a1128f022c05fb2890a9
|
refs/heads/main
| 2023-07-17T00:36:43.772435
| 2021-08-31T16:24:07
| 2021-08-31T16:24:07
| 400,630,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
# Python program to count Even and Odd numbers in a List
# Input: list1 = [2, 7, 5, 64, 14]
# Output: Even = 3, odd = 2
a = 0
b = 0
list1 = [2, 7, 5, 64, 14]
for i in range(0, len(list1)):
if i % 2 == 0:
a += 1
else:
b += 1
print("There are {} Even, and {} Odd".format(a, b))
|
[
"noreply@github.com"
] |
Darrenrodricks.noreply@github.com
|
22393b19c5fb7be5e9fe08ffa8f211847a997248
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/network/v20201101/get_load_balancer.py
|
fd9af3d72c16c5bbcb5d9fd47aa696d0901401e7
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,378
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetLoadBalancerResult',
'AwaitableGetLoadBalancerResult',
'get_load_balancer',
'get_load_balancer_output',
]
@pulumi.output_type
class GetLoadBalancerResult:
"""
LoadBalancer resource.
"""
def __init__(__self__, backend_address_pools=None, etag=None, extended_location=None, frontend_ip_configurations=None, id=None, inbound_nat_pools=None, inbound_nat_rules=None, load_balancing_rules=None, location=None, name=None, outbound_rules=None, probes=None, provisioning_state=None, resource_guid=None, sku=None, tags=None, type=None):
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if inbound_nat_pools and not isinstance(inbound_nat_pools, list):
raise TypeError("Expected argument 'inbound_nat_pools' to be a list")
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
if inbound_nat_rules and not isinstance(inbound_nat_rules, list):
raise TypeError("Expected argument 'inbound_nat_rules' to be a list")
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
if load_balancing_rules and not isinstance(load_balancing_rules, list):
raise TypeError("Expected argument 'load_balancing_rules' to be a list")
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outbound_rules and not isinstance(outbound_rules, list):
raise TypeError("Expected argument 'outbound_rules' to be a list")
pulumi.set(__self__, "outbound_rules", outbound_rules)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]:
"""
Collection of backend address pools used by a load balancer.
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:
"""
The extended location of the load balancer.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.FrontendIPConfigurationResponse']]:
"""
Object representing the frontend IPs to be used for the load balancer.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Optional[Sequence['outputs.InboundNatPoolResponse']]:
"""
Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]:
"""
Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[Sequence['outputs.LoadBalancingRuleResponse']]:
"""
Object collection representing the load balancing rules Gets the provisioning.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> Optional[Sequence['outputs.OutboundRuleResponse']]:
"""
The outbound rules.
"""
return pulumi.get(self, "outbound_rules")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ProbeResponse']]:
"""
Collection of probe objects used in the load balancer.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the load balancer resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the load balancer resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.LoadBalancerSkuResponse']:
"""
The load balancer SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetLoadBalancerResult(GetLoadBalancerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLoadBalancerResult(
backend_address_pools=self.backend_address_pools,
etag=self.etag,
extended_location=self.extended_location,
frontend_ip_configurations=self.frontend_ip_configurations,
id=self.id,
inbound_nat_pools=self.inbound_nat_pools,
inbound_nat_rules=self.inbound_nat_rules,
load_balancing_rules=self.load_balancing_rules,
location=self.location,
name=self.name,
outbound_rules=self.outbound_rules,
probes=self.probes,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_load_balancer(expand: Optional[str] = None,
load_balancer_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLoadBalancerResult:
"""
LoadBalancer resource.
:param str expand: Expands referenced resources.
:param str load_balancer_name: The name of the load balancer.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['loadBalancerName'] = load_balancer_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20201101:getLoadBalancer', __args__, opts=opts, typ=GetLoadBalancerResult).value
return AwaitableGetLoadBalancerResult(
backend_address_pools=__ret__.backend_address_pools,
etag=__ret__.etag,
extended_location=__ret__.extended_location,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
id=__ret__.id,
inbound_nat_pools=__ret__.inbound_nat_pools,
inbound_nat_rules=__ret__.inbound_nat_rules,
load_balancing_rules=__ret__.load_balancing_rules,
location=__ret__.location,
name=__ret__.name,
outbound_rules=__ret__.outbound_rules,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_load_balancer)
def get_load_balancer_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
load_balancer_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLoadBalancerResult]:
"""
LoadBalancer resource.
:param str expand: Expands referenced resources.
:param str load_balancer_name: The name of the load balancer.
:param str resource_group_name: The name of the resource group.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
9cca242910678dbdb4fce620cc6f69091f65087c
|
539b031a4edd1aec31af5b6658f25a0de03776a4
|
/strings_and_text/sub_re_groups1.py
|
b91fbc11289d3b5f5a17a2b714d35dde5bec785c
|
[] |
no_license
|
leogtzr/python-cookbook-code-snippets
|
c517e7f14e468e1aa8def71d3389348150d43085
|
a3f189c26ba38bc982dd140b3b4d6326b39671dc
|
refs/heads/main
| 2023-01-23T07:16:30.292456
| 2020-11-28T04:29:42
| 2020-11-28T04:29:42
| 309,217,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
import re
from calendar import month_abbr
def change_date(m):
mon_name = month_abbr[int(m.group(1))]
return '[{}] ({}) |{}|'.format(m.group(2), mon_name, m.group(3))
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
text = 'Today is 11/27/2012. PyCon starts 3/13/2013.'
# a substitution callback function
print(datepat.sub(change_date, text))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# print(month_abbr[12])
for month_num in range(1, 13):
print(month_abbr[month_num])
|
[
"leogutierrezramirez@gmail.com"
] |
leogutierrezramirez@gmail.com
|
49f6016496073d31808c5ceda4ff0bb6ac102c09
|
974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184
|
/sdk/ml/azure-ai-ml/tests/compute/unittests/test_compute_operations.py
|
ecfc11bd07cf4aafd9e8a34abaa324d6be10f0af
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
gaoyp830/azure-sdk-for-python
|
4816f04c554dcffb7510a6b7044b0c86a2dd32e1
|
1c66defa502b754abcc9e5afa444ca03c609342f
|
refs/heads/master
| 2022-10-20T21:33:44.281041
| 2022-09-29T17:03:13
| 2022-09-29T17:03:13
| 250,355,505
| 0
| 0
|
MIT
| 2020-03-26T19:42:13
| 2020-03-26T19:42:12
| null |
UTF-8
|
Python
| false
| false
| 4,501
|
py
|
from typing import Callable
from unittest.mock import Mock
import pytest
import vcr
from pytest_mock import MockFixture
from azure.ai.ml import load_compute
from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope
from azure.ai.ml.entities import AmlCompute, Compute, ComputeInstance, IdentityConfiguration, UserAssignedIdentity
from azure.ai.ml.operations import ComputeOperations
from azure.identity import DefaultAzureCredential
@pytest.fixture
def mock_compute_operation(
mock_workspace_scope: OperationScope, mock_operation_config: OperationConfig, mock_aml_services_2021_10_01: Mock
) -> ComputeOperations:
yield ComputeOperations(
operation_scope=mock_workspace_scope,
operation_config=mock_operation_config,
service_client=mock_aml_services_2021_10_01,
)
class funny:
def __init__(self):
self.location = "somelocation"
@pytest.mark.unittest
class TestComputeOperation:
def test_list(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.list()
mock_compute_operation._operation.list.assert_called_once()
def test_create_compute_instance(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
mocker.patch(
"azure.ai.ml._restclient.v2021_10_01.workspaces.get",
return_value=funny(),
)
mocker.patch(
"azure.ai.ml.entities.Compute._from_rest_object",
return_value=ComputeInstance(name="name", resource_id="test_resource_id"),
)
compute = load_compute("./tests/test_configs/compute/compute-ci-unit.yaml")
mock_compute_operation.begin_create_or_update(compute=compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_create_aml_compute(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
mocker.patch("azure.ai.ml._restclient.v2021_10_01.workspaces.get", return_value=funny())
compute = load_compute("./tests/test_configs/compute/compute-aml.yaml")
mock_compute_operation.begin_create_or_update(compute=compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_delete(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_delete("randstr")
mock_compute_operation._operation.begin_delete.assert_called_once()
def test_show(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.get("randstr")
mock_compute_operation._operation.get.assert_called_once()
def test_start(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_start("randstr")
mock_compute_operation._operation.begin_start.assert_called_once()
def test_stop(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_stop("randstr")
mock_compute_operation._operation.begin_stop.assert_called_once()
def test_restart(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_restart("randstr")
mock_compute_operation._operation.begin_restart.assert_called_once()
def test_update_aml_compute(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
compute = AmlCompute(
name="name",
tags={"key1": "value1", "key2": "value2"},
min_instances=0,
max_instances=10,
idle_time_before_scale_down=100,
identity=IdentityConfiguration(
type="UserAssigned",
user_assigned_identities=[
UserAssignedIdentity(
resource_id="/subscriptions/b17253fa-f327-42d6-9686-f3e553e24763/resourcegroups/MC_banibatch_bani-aks_eastus/providers/Microsoft.ManagedIdentity/userAssignedIdentities/omsagent-bani-aks"
)
],
),
)
mock_compute_operation.begin_update(compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_detach(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_delete(
name="randstr",
action="Detach",
)
mock_compute_operation._operation.begin_delete.assert_called_once()
|
[
"noreply@github.com"
] |
gaoyp830.noreply@github.com
|
0998ba915d80be9aaf99b9cef30acdb467528d1c
|
2e145222a18d4509d937951f5cec4df0e26ee86f
|
/vas/gemfire/CacheServerInstances.py
|
9405a719dd10335d0ccd1c552ba07c8a6ef1c57d
|
[
"Apache-2.0"
] |
permissive
|
vdreamakitex/vas-python-api
|
7627b7e3fcf76c16b1ea8b9fb670fdb708eff083
|
ce7148a2044863e078e78b47abbaafc426f732ee
|
refs/heads/master
| 2021-01-18T05:13:25.459916
| 2012-11-05T09:58:45
| 2012-11-05T09:58:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,867
|
py
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Instance import Instance
from vas.shared.MutableCollection import MutableCollection
from vas.util.LinkUtils import LinkUtils
class CacheServerInstances(MutableCollection):
"""Used to enumerate, create, and delete cache server instances
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(CacheServerInstances, self).__init__(client, location, 'cache-server-group-instances',
CacheServerInstance)
def create(self, installation, name):
"""Creates a new cache server instance
:param `vas.gemfire.Installations.Installation` installation: The installation to be used by the instance
:param str name: The name of the instance
:rtype: :class:`vas.gemfire.CacheServerInstances.CacheServerInstance`
:return: The new cache server instance
"""
payload = {'installation': installation._location, 'name': name}
return self._create(payload, 'cache-server-group-instance')
class CacheServerInstance(Instance):
"""A cache server instance
:ivar `vas.gemfire.Groups.Group` group: The group that contains this instance
:ivar `vas.gemfire.Installations.Installation` installation: The installation that this instance is using
:ivar `vas.gemfire.LiveApplicationCodes.LiveApplicationCodes` live_application_code: The instance's live
application code
:ivar `vas.gemfire.CacheServerLiveConfigurations.CacheServerLiveConfigurations` live_configurations: The instance's live
configurations
:ivar str name: The instance's name
:ivar list node_instances: The instance's individual node instances
:ivar `vas.gemfire.PendingApplicationCodes.PendingApplicationCodes` pending_application_code: The instance's
pending application
code
:ivar `vas.gemfire.CacheServerPendingConfigurations.CacheServerPendingConfigurations` pending_configurations: The instance's
pending configurations
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str state: Retrieves the state of the resource from the server.
Will be one of:
* ``STARTING``
* ``STARTED``
* ``STOPPING``
* ``STOPPED``
"""
__live_application_code = None
__pending_application_code = None
@property
def live_application_code(self):
self.__live_application_code = self.__live_application_code or LiveApplicationCodes(self._client,
self.__live_application_code_location)
return self.__live_application_code
@property
def pending_application_code(self):
self.__pending_application_code = self.__pending_application_code or PendingApplicationCodes(self._client,
self.__pending_application_code_location)
return self.__pending_application_code
def __init__(self, client, location):
super(CacheServerInstance, self).__init__(client, location, Group, Installation, CacheServerLiveConfigurations,
CacheServerPendingConfigurations, CacheServerNodeInstance, 'cache-server-node-instance')
self.__live_application_code_location = LinkUtils.get_link_href(self._details, 'live-application-code')
self.__pending_application_code_location = LinkUtils.get_link_href(self._details, 'pending-application-code')
def update(self, installation):
"""Updates the instance to use a different installation
:param `vas.gemfire.Installations.Installation` installation: The installation that the instance should use
"""
self._client.post(self._location, {'installation': installation._location})
self.reload()
from vas.gemfire.CacheServerLiveConfigurations import CacheServerLiveConfigurations
from vas.gemfire.CacheServerNodeInstances import CacheServerNodeInstance
from vas.gemfire.CacheServerPendingConfigurations import CacheServerPendingConfigurations
from vas.gemfire.Groups import Group
from vas.gemfire.Installations import Installation
from vas.gemfire.LiveApplicationCodes import LiveApplicationCodes
from vas.gemfire.PendingApplicationCodes import PendingApplicationCodes
|
[
"bhale@vmware.com"
] |
bhale@vmware.com
|
f89f1ded358f684cfea5a04737b4a6369bd9f99b
|
b2f7d3c8b8e5eee108a600b611183bdb5c0399da
|
/GIT/Data Structures/Chess Board/knight_tour.py
|
e6ced2bee0f84ab08c5534243fc53565c3122703
|
[] |
no_license
|
yellowRanger1111/Algorithm-and-Data-Structures
|
ca39566d8d4236bcaffe1a12fff18a1e24eaa760
|
0f3a2ebb6309e46117de66e5b4c799f70f6ec9cb
|
refs/heads/main
| 2023-09-03T09:22:27.248617
| 2021-10-16T07:09:14
| 2021-10-16T07:09:14
| 417,740,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,840
|
py
|
#getting the tour class
from tour import Tour
#creating the knight object
knight = Tour()
#initialize the loop
quitted = False
# knight.move_knight(2,1)
# knight.move_knight(2,5)
# knight.show_tour()
# knight.delete_pos((2,5))
# knight.show_tour()
while not quitted:
#what the user want to do?
user_input = input("1 : Move Knight \n2 : Undo\n3 : Reset Board\n4 : Save List\n5 : Restore List\n6 : Quit\n--> ")
if (user_input == "1"):
#printing next move
possible_moves = knight.next_move()
print("possibe moves", possible_moves)
quit_inner_loop = False
#if no more possible moves, player automatically quit
if (len(possible_moves) == 0):
print ("LOL U LOSE")
quitted = True
quit_inner_loop = False
while not quit_inner_loop:
#user input
row_new = input("Move knight to which row ? ")
col_new = input("Move knight to which col ? ")
#making sure is the input is a number
try:
row_new, col_new = int(row_new), int(col_new)
#if err
except ValueError as err:
print ("err : ", err)
#if true, do this code
else:
#if valid do it
if (knight.valid_move(possible_moves, row_new, col_new)):
quit_inner_loop= True
#not valid, cannot move
else:
print("not possible! Knight can only move in L\n")
#move the knight to designated space
knight.move_knight(row_new, col_new)
#show the tour
knight.show_tour()
elif (user_input == "2"):
#delete the last move
knight.undo_moves()
#show the tour
knight.show_tour()
elif (user_input == "3"):
#ask for the new starting position
row_new = int(input("Move knight to which row ? "))
col_new = int(input("Move knight to which col ? "))
try:
row_new, col_new = int(row_new), int(col_new)
except ValueError as err:
print ("err : ", err)
else:
#reset
knight.reset_board(row_new, col_new)
#show tour
knight.show_tour()
elif user_input == "4":
#ask to save
knight.copy_list()
print("List have been saved")
elif user_input == "5":
#restore list
knight.set_list()
print("List have been Restored")
knight.show_tour()
elif (user_input == "6"):
#means user want to quit
quitted = True
print ("")
|
[
"noreply@github.com"
] |
yellowRanger1111.noreply@github.com
|
9f6fbd9a7403ab1554b410967fdf106ad6d85420
|
1a3f53438a1597b4aa930c91740cd8db72c66fcf
|
/payments/models.py
|
42a4c7e5491d02b81fe035af576d7ac0ccf6cfc8
|
[] |
no_license
|
milad-ghr/strips_project
|
de898afef145cccb149f4f68794dc61831ee03f8
|
ddc985b36fc704aa803ee82dfbc271b8fd5794ec
|
refs/heads/master
| 2023-07-15T09:10:52.572753
| 2021-08-26T12:21:33
| 2021-08-26T12:21:33
| 399,731,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
from django.db import models
from django_cryptography.fields import encrypt
from products.models import Product
from users.models import Subscription, StripsCustomer
class PaymentsManager(models.Manager):
pass
class PaymentsManagerWithNoDeleted(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(deleted=False)
class AbstractPaymentModel(models.Model):
creation_date = models.DateTimeField(auto_now_add=True)
modification_date = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
deleted_at = models.DateTimeField(null=True, blank=True)
objects = PaymentsManagerWithNoDeleted()
allobjects = PaymentsManager()
class Meta:
abstract = True
class Card(models.Model):
number = encrypt(models.CharField(max_length=16, unique=True))
expiration_month = encrypt(models.CharField(max_length=2))
expiration_year = encrypt(models.CharField(max_length=4))
cvc = encrypt(models.CharField(max_length=4))
brand = models.CharField(max_length=15, null=True, blank=True)
country = models.CharField(max_length=10, null=True, blank=True)
fingerprint = encrypt(models.CharField(max_length=30, null=True, blank=True))
funding = models.CharField(max_length=10, null=True, blank=True)
class Meta:
ordering = ['id']
class Payment(AbstractPaymentModel):
class PaymentMethodsChoices(models.IntegerChoices):
CARD = 1
payment_method = models.IntegerField(choices=PaymentMethodsChoices.choices,
default=PaymentMethodsChoices.CARD.value
)
payment_id = models.CharField(max_length=50, null=True, blank=True)
strip_customer = models.ForeignKey(StripsCustomer,
on_delete=models.SET_NULL,
null=True,
related_name='payments',
related_query_name='payment'
)
card = models.ForeignKey(Card,
on_delete=models.SET_NULL,
null=True,
related_name='payments',
related_query_name='payment'
)
subscription = models.OneToOneField(Subscription,
on_delete=models.SET_NULL,
null=True,
related_name='payment',
related_query_name='payment'
)
product = models.ForeignKey(Product,
on_delete=models.SET_NULL,
null=True,
related_name='payments',
related_query_name='payment'
)
billing_detail = models.JSONField(null=True, blank=True)
class Meta:
ordering = ['id']
|
[
"gholamrezaei.milad@gmail.com"
] |
gholamrezaei.milad@gmail.com
|
53bc8edebb6fabc73a2cacad23ca6d8b08fa9b0a
|
16450d59c820298f8803fd40a1ffa2dd5887e103
|
/baekjoon/2667.py
|
b85ee7659586696418e866ced977042046429337
|
[] |
no_license
|
egyeasy/TIL_public
|
f78c11f81d159eedb420f5fa177c05d310c4a039
|
e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38
|
refs/heads/master
| 2021-06-21T01:22:16.516777
| 2021-02-02T13:16:21
| 2021-02-02T13:16:21
| 167,803,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,257
|
py
|
"""
<그림 1>과 같이 정사각형 모양의 지도가 있다. 1은 집이 있는 곳을, 0은 집이 없는 곳을 나타낸다. 철수는 이 지도를 가지고 연결된 집들의 모임인 단지를 정의하고, 단지에 번호를 붙이려 한다. 여기서 연결되었다는 것은 어떤 집이 좌우, 혹은 아래위로 다른 집이 있는 경우를 말한다. 대각선상에 집이 있는 경우는 연결된 것이 아니다. <그림 2>는 <그림 1>을 단지별로 번호를 붙인 것이다. 지도를 입력하여 단지수를 출력하고, 각 단지에 속하는 집의 수를 오름차순으로 정렬하여 출력하는 프로그램을 작성하시오.
> input
첫 번째 줄에는 지도의 크기 N(정사각형이므로 가로와 세로의 크기는 같으며 5≤N≤25)이 입력되고, 그 다음 N줄에는 각각 N개의 자료(0혹은 1)가 입력된다.
7
0110100
0110101
1110101
0000111
0100000
0111110
0111000
> output
첫 번째 줄에는 총 단지수를 출력하시오. 그리고 각 단지내 집의 수를 오름차순으로 정렬하여 한 줄에 하나씩 출력하시오.
3
7
8
9
"""
import sys
sys.stdin = open('2667.txt', 'r')
each_cnt = 0
def DFS(s):
global each_cnt
visited[s[0]][s[1]] = 1
each_cnt += 1
go_list = [[-1, 0], [0, 1], [1, 0], [0, -1]]
for go in go_list:
if matrix[s[0] + go[0]][s[1] + go[1]] == 1 and not visited[s[0] + go[0]][s[1] + go[1]]:
DFS([s[0] + go[0], s[1] + go[1]])
m = int(input())
matrix = [[0] * (m + 2) for i in range(m + 2)]
visited = [[0] * (m + 2) for i in range(m + 2)]
for i in range(m):
aline = list(map(int, input()))
for j in range(m):
matrix[i + 1][j + 1] = aline[j]
# for i in matrix:
# print(i)
total_cnt = 0
each_cnt = 0
cnts = [0] * (m**2)
idx = 0
for i in range(1, m + 2):
for j in range(1, m + 2):
if matrix[i][j] == 1 and not visited[i][j]:
each_cnt = 0
total_cnt += 1
DFS([i, j])
# print(each_cnt)
cnts[idx] = each_cnt
idx += 1
print(total_cnt)
for i in sorted(cnts[:total_cnt]):
print(i)
# idea
# 1. Some details are added in DFS problem.
# 2. Most important: Catching this is DFS problem.
|
[
"dz1120@gmail.com"
] |
dz1120@gmail.com
|
e8c2786ad69cfccec2ad37b66382443519baed1a
|
59fb17c240b261040026d713a6ac9c97d6a9f265
|
/gym/gym/envs/registration.py
|
18519749167fe193d8d2cb3b3348653ae837fd17
|
[
"MIT"
] |
permissive
|
dmeger/TeachingImitation
|
3fb97499e76929959913266f127154f6ae5a8e99
|
5f4dba7e49987924c3d55cd27579cad4c71ef7a4
|
refs/heads/master
| 2023-03-28T13:25:01.307382
| 2021-04-06T15:07:08
| 2021-04-06T15:07:08
| 355,223,500
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,883
|
py
|
import re
import copy
import importlib
import warnings
from gym import error, logger
# This format is true today, but it's *not* an official spec.
# [username/](env-name)-v(version) env-name is group 1, version is group 2
#
# 2016-10-31: We're experimentally expanding the environment ID format
# to include an optional username.
env_id_re = re.compile(r'^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$')
def load(name):
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
class EnvSpec(object):
"""A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before the task is considered solved
nondeterministic (bool): Whether this environment is non-deterministic even after seeding
max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
kwargs (dict): The kwargs to pass to the environment class
"""
def __init__(self, id, entry_point=None, reward_threshold=None, nondeterministic=False, max_episode_steps=None, kwargs=None):
self.id = id
self.entry_point = entry_point
self.reward_threshold = reward_threshold
self.nondeterministic = nondeterministic
self.max_episode_steps = max_episode_steps
self._kwargs = {} if kwargs is None else kwargs
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))
self._env_name = match.group(1)
def make(self, **kwargs):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self.entry_point is None:
raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entry_point):
env = self.entry_point(**_kwargs)
else:
cls = load(self.entry_point)
env = cls(**_kwargs)
# Make the environment aware of which spec it came from.
spec = copy.deepcopy(self)
spec._kwargs = _kwargs
env.unwrapped.spec = spec
return env
def __repr__(self):
return "EnvSpec({})".format(self.id)
class EnvRegistry(object):
"""Register an env by ID. IDs remain stable over time and are
guaranteed to resolve to the same environment dynamics (or be
desupported). The goal is that results on a particular environment
should always be comparable, and not depend on the version of the
code that was running.
"""
def __init__(self):
self.env_specs = {}
def make(self, path, **kwargs):
if len(kwargs) > 0:
logger.info('Making new env: %s (%s)', path, kwargs)
else:
logger.info('Making new env: %s', path)
spec = self.spec(path)
env = spec.make(**kwargs)
# We used to have people override _reset/_step rather than
# reset/step. Set _gym_disable_underscore_compat = True on
# your environment if you use these methods and don't want
# compatibility code to be invoked.
if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
patch_deprecated_methods(env)
if env.spec.max_episode_steps is not None:
from gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env, max_episode_steps=env.spec.max_episode_steps)
return env
def all(self):
return self.env_specs.values()
def spec(self, path):
if ':' in path:
mod_name, _sep, id = path.partition(':')
try:
importlib.import_module(mod_name)
# catch ImportError for python2.7 compatibility
except ImportError:
raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))
else:
id = path
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))
try:
return self.env_specs[id]
except KeyError:
# Parse the env name and check to see if it matches the non-version
# part of a valid env (could also check the exact number here)
env_name = match.group(1)
matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()
if env_name == valid_env_spec._env_name]
if matching_envs:
raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))
else:
raise error.UnregisteredEnv('No registered env with id: {}'.format(id))
def register(self, id, **kwargs):
if id in self.env_specs:
raise error.Error('Cannot re-register id: {}'.format(id))
self.env_specs[id] = EnvSpec(id, **kwargs)
# Have a global registry
registry = EnvRegistry()
def register(id, **kwargs):
return registry.register(id, **kwargs)
def make(id, **kwargs):
return registry.make(id, **kwargs)
def spec(id):
return registry.spec(id)
warn_once = True
def patch_deprecated_methods(env):
"""
Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.
For backward compatibility, this makes it possible to work with unmodified environments.
"""
global warn_once
if warn_once:
logger.warn("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env)))
warn_once = False
env.reset = env._reset
env.step = env._step
env.seed = env._seed
def render(mode):
return env._render(mode, close=False)
def close():
env._render("human", close=True)
env.render = render
env.close = close
|
[
"david.meger@gmail.com"
] |
david.meger@gmail.com
|
c5b2feb974eeccc764b107a02ec1e4787afc5347
|
25724c6023af28e720e0be24c265ac30b5552eb8
|
/base/convertor.py
|
889585323ed605ebefb17cc6109405832c4d911f
|
[] |
no_license
|
imyelo/sudoager
|
81e59170d64da9614901dcbb997d5a4a89da33df
|
b10cc197bb6a4cfd302aa80f811b5e106a02e2c1
|
refs/heads/master
| 2016-09-05T18:16:11.209845
| 2012-12-26T07:49:48
| 2012-12-26T07:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
#coding=utf-8
# Filename: convertor.py
import math
# --Convertor--
# Decimal to Ternary
def getTer3(i, base=3):
return int(math.floor(i / (base ** 3)) % base)
def getTer2(i, base=3):
return int(math.floor(i / (base ** 2)) % base)
def getTer1(i, base=3):
return int(math.floor(i / base) % base)
def getTer0(i, base=3):
return int(i % base)
# id to R,C,B,A
def id2Row(id, base=3):
return int(getTer3(id, base) * base + getTer1(id, base))
def id2Column(id, base=3):
return int(getTer2(id, base) * base + getTer0(id, base))
def id2Box(id, base=3):
return int(getTer3(id, base) * base + getTer2(id, base))
def id2Atom(id, base=3):
return int(getTer1(id, base) * base + getTer0(id, base))
# B&A to id,R,C
def BA2id(box, atom, base=3):
return int(getTer1(box, base) * (base ** 3) + getTer0(box, base) * (base ** 2) + getTer1(atom, base) * base + getTer0(atom, base))
def BA2Row(box, atom, base=3):
return int(getTer1(box, base) * base + getTer1(atom, base))
def BA2Column(box, atom, base=3):
return int(getTer0(box, base) * base + getTer0(atom, base))
# R&C to id,B,A
def RC2id(row, column, base=3):
return int(getTer1(row, base) * (base ** 3) + getTer1(column, base) * (base ** 2) + getTer0(row, base) * base + getTer0(column, base))
def RC2Box(row, column, base=3):
return int(getTer1(row, base) * base + getTer1(column, base))
def RC2Atom(row, column, base=3):
return int(getTer0(row, base) * base + getTer0(column, base))
# --/Convertor--
|
[
"zhihuzeye@gmail.com"
] |
zhihuzeye@gmail.com
|
926a6e5c2f8e14ca41571537b899f12932268bbd
|
5258903bb9cdeedf13a7101aa98e82c915914974
|
/curriculum/migrations/0001_initial.py
|
d9ad40787e5dfc4ba0c58df54e9178a4392de19e
|
[
"BSD-2-Clause"
] |
permissive
|
ZuluPro/django-cv
|
7328e4f3e30ecfef7c5e6e598d5b95986ed7dbe9
|
64a7fda155d7052642484ebc9a7e7822d73ea1b0
|
refs/heads/master
| 2020-05-29T18:12:29.262859
| 2016-06-12T23:13:11
| 2016-06-12T23:13:11
| 45,745,590
| 10
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,493
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import curriculum.models.utils
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Certification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name='title')),
('authority', models.CharField(max_length=200, verbose_name='authority')),
('url', models.URLField(max_length=300, verbose_name='URL', blank=True)),
('description', models.TextField(max_length=2000, verbose_name='description', blank=True)),
],
),
migrations.CreateModel(
name='CertificationItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_year', models.IntegerField(verbose_name='start year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(verbose_name='start month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('expires', models.BooleanField(default=False, verbose_name='expires')),
('end_year', models.IntegerField(blank=True, null=True, verbose_name='end year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('end_month', models.IntegerField(blank=True, null=True, verbose_name='end month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('certification', models.ForeignKey(related_name='items', to='curriculum.Certification')),
],
),
migrations.CreateModel(
name='Experience',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('entreprise', models.CharField(max_length=200, verbose_name='entreprise')),
('context', models.TextField(max_length=1000, verbose_name='context', blank=True)),
('description', models.TextField(max_length=3000, verbose_name='description', blank=True)),
('results', models.TextField(max_length=3000, verbose_name='results', blank=True)),
('type', models.CharField(max_length=5, null=True, verbose_name='type', choices=[(None, 'unknown'), (b'SALAR', 'salaried'), (b'CHIEF', 'founder/chief'), (b'FREEL', 'freelance/chief'), (b'OTHER', 'other')])),
('environment', models.CharField(max_length=400, verbose_name='environment', blank=True)),
('start_year', models.IntegerField(default=curriculum.models.utils.current_year, verbose_name='start year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(default=curriculum.models.utils.current_month, verbose_name='start month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('still', models.BooleanField(default=True, verbose_name='still in office')),
('end_year', models.IntegerField(blank=True, null=True, verbose_name='end year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('end_month', models.IntegerField(blank=True, null=True, verbose_name='end month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('weight', models.IntegerField(default=1, verbose_name='weight', choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')])),
],
),
migrations.CreateModel(
name='Language',
fields=[
('name', models.CharField(max_length=50, unique=True, serialize=False, verbose_name='name', primary_key=True)),
('description', models.TextField(max_length=2000, verbose_name='description', blank=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='LanguageItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(default=b'NOT', max_length=5, verbose_name='level', choices=[(b'NOT', 'Notion'), (b'BAS', 'basic'), (b'ADV', 'advanced'), (b'PRO', 'professional'), (b'BIL', 'bilingual')])),
('language', models.ForeignKey(related_name='items', to='curriculum.Language')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(unique=True, max_length=200, verbose_name='title')),
('description', models.TextField(max_length=3000, verbose_name='description', blank=True)),
('url', models.URLField(max_length=300, verbose_name='URL', blank=True)),
],
),
migrations.CreateModel(
name='ProjectItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contribution', models.TextField(max_length=3000, verbose_name='contribution', blank=True)),
('start_year', models.IntegerField(default=curriculum.models.utils.current_year, verbose_name='start year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(default=curriculum.models.utils.current_month, verbose_name='start month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('still', models.BooleanField(default=True, verbose_name='still contributor')),
('end_year', models.IntegerField(blank=True, null=True, verbose_name='end year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('end_month', models.IntegerField(blank=True, null=True, verbose_name='end month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('weight', models.IntegerField(default=1, verbose_name='weight', choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')])),
('project', models.ForeignKey(related_name='items', to='curriculum.Project')),
],
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('firstname', models.CharField(max_length=150, verbose_name='First name')),
('lastname', models.CharField(max_length=150, verbose_name='Last name')),
('title', models.CharField(max_length=200, null=True, verbose_name='Title', blank=True)),
('resume', models.TextField(help_text="Short profile's description", max_length=3000, null=True, verbose_name='resume', blank=True)),
('image', models.ImageField(upload_to=b'', null=True, verbose_name='image', blank=True)),
('phone', models.CharField(max_length=100, null=True, verbose_name='phone', blank=True)),
('website', models.URLField(max_length=300, null=True, verbose_name='website', blank=True)),
('email', models.CharField(max_length=100, null=True, verbose_name='email', blank=True)),
('city', models.CharField(max_length=100, null=True, verbose_name='city', blank=True)),
('country', models.CharField(max_length=100, null=True, verbose_name='country', blank=True)),
('address', models.CharField(max_length=300, null=True, verbose_name='address', blank=True)),
('skill_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of skills', blank=True)),
('experience_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of experience', blank=True)),
('training_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of trainings', blank=True)),
('project_summary', models.TextField(max_length=1000, null=True, verbose_name='summary of projects', blank=True)),
('driving_license', models.CharField(max_length=100, null=True, verbose_name='driving license', blank=True)),
('hobbies', models.TextField(max_length=1000, null=True, verbose_name='hobbies', blank=True)),
('tags', models.CharField(max_length=500, null=True, verbose_name='tags', blank=True)),
('skype', models.CharField(max_length=100, null=True, verbose_name='Skype ID', blank=True)),
('twitter', models.CharField(max_length=100, null=True, verbose_name='Twitter', blank=True)),
('linkedin', models.CharField(max_length=100, null=True, verbose_name='LinkedIn ID', blank=True)),
('google', models.CharField(max_length=100, null=True, verbose_name='Google+ ID', blank=True)),
('stackoverflow', models.IntegerField(null=True, verbose_name='StackOverflow ID', blank=True)),
('github', models.CharField(max_length=300, null=True, verbose_name='GitHub ID', blank=True)),
],
options={
'verbose_name': 'resume',
},
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=200, verbose_name='name')),
('description', models.TextField(max_length=2000, verbose_name='description', blank=True)),
('url', models.URLField(max_length=300, verbose_name='URL', blank=True)),
('tags', models.CharField(max_length=500, verbose_name='tags', blank=True)),
('color', models.CharField(max_length=50, verbose_name='color', blank=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SkillItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(max_length=1, verbose_name='level', choices=[(None, 'unknown'), (b'B', 'beginner'), (b'S', 'skilled'), (b'A', 'advanced'), (b'E', 'expert')])),
('category', models.CharField(max_length=50, verbose_name='category', blank=True)),
('start_year', models.IntegerField(default=curriculum.models.utils.current_year, null=True, verbose_name='start year', blank=True, choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('start_month', models.IntegerField(default=curriculum.models.utils.current_month, null=True, verbose_name='start month', blank=True, choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('weight', models.IntegerField(default=1, verbose_name='weight', choices=[(0, 'Minor'), (1, 'Medium'), (2, 'Major')])),
('resume', models.ForeignKey(related_name='skills', to='curriculum.Resume')),
('skill', models.ForeignKey(related_name='items', to='curriculum.Skill')),
],
),
migrations.CreateModel(
name='Training',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('school', models.CharField(max_length=150, verbose_name='school')),
('degree', models.CharField(max_length=150, verbose_name='degree')),
('topic', models.CharField(max_length=150, verbose_name='topic', blank=True)),
('result', models.CharField(max_length=150, verbose_name='result', blank=True)),
('description', models.TextField(max_length=3000, verbose_name='description', blank=True)),
('year', models.IntegerField(verbose_name='year', choices=[(2029, 2029), (2028, 2028), (2027, 2027), (2026, 2026), (2025, 2025), (2024, 2024), (2023, 2023), (2022, 2022), (2021, 2021), (2020, 2020), (2019, 2019), (2018, 2018), (2017, 2017), (2016, 2016), (2015, 2015), (2014, 2014), (2013, 2013), (2012, 2012), (2011, 2011), (2010, 2010), (2009, 2009), (2008, 2008), (2007, 2007), (2006, 2006), (2005, 2005), (2004, 2004), (2003, 2003), (2002, 2002), (2001, 2001), (2000, 2000), (1999, 1999), (1998, 1998), (1997, 1997), (1996, 1996), (1995, 1995), (1994, 1994), (1993, 1993), (1992, 1992), (1991, 1991), (1990, 1990), (1989, 1989), (1988, 1988), (1987, 1987), (1986, 1986), (1985, 1985), (1984, 1984), (1983, 1983), (1982, 1982), (1981, 1981), (1980, 1980), (1979, 1979), (1978, 1978), (1977, 1977), (1976, 1976), (1975, 1975), (1974, 1974), (1973, 1973), (1972, 1972), (1971, 1971), (1970, 1970), (1969, 1969), (1968, 1968), (1967, 1967), (1966, 1966), (1965, 1965), (1964, 1964), (1963, 1963), (1962, 1962), (1961, 1961), (1960, 1960), (1959, 1959), (1958, 1958), (1957, 1957), (1956, 1956), (1955, 1955), (1954, 1954), (1953, 1953), (1952, 1952), (1951, 1951), (1950, 1950), (1949, 1949), (1948, 1948), (1947, 1947), (1946, 1946), (1945, 1945), (1944, 1944), (1943, 1943), (1942, 1942), (1941, 1941), (1940, 1940), (1939, 1939), (1938, 1938), (1937, 1937), (1936, 1936), (1935, 1935), (1934, 1934), (1933, 1933), (1932, 1932), (1931, 1931), (1930, 1930), (1929, 1929), (1928, 1928), (1927, 1927), (1926, 1926), (1925, 1925), (1924, 1924), (1923, 1923), (1922, 1922), (1921, 1921), (1920, 1920), (1919, 1919), (1918, 1918), (1917, 1917), (1916, 1916), (1915, 1915), (1914, 1914), (1913, 1913), (1912, 1912), (1911, 1911), (1910, 1910), (1909, 1909), (1908, 1908), (1907, 1907), (1906, 1906), (1905, 1905), (1904, 1904), (1903, 1903), (1902, 1902), (1901, 1901), (1900, 1900)])),
('month', models.IntegerField(verbose_name='month', choices=[(1, 'january'), (2, 'febuary'), (3, 'march'), (4, 'april'), (5, 'may'), (6, 'june'), (7, 'july'), (8, 'august'), (9, 'september'), (10, 'october'), (11, 'november'), (12, 'december')])),
('resume', models.ForeignKey(related_name='trainings', to='curriculum.Resume')),
],
),
migrations.AddField(
model_name='projectitem',
name='resume',
field=models.ForeignKey(related_name='projects', to='curriculum.Resume'),
),
migrations.AddField(
model_name='languageitem',
name='resume',
field=models.ForeignKey(related_name='languages', to='curriculum.Resume'),
),
migrations.AddField(
model_name='experience',
name='resume',
field=models.ForeignKey(related_name='experiences', to='curriculum.Resume'),
),
migrations.AddField(
model_name='certificationitem',
name='resume',
field=models.ForeignKey(related_name='certifications', to='curriculum.Resume'),
),
migrations.AlterUniqueTogether(
name='certification',
unique_together=set([('title', 'authority')]),
),
migrations.AlterUniqueTogether(
name='skillitem',
unique_together=set([('skill', 'resume')]),
),
migrations.AlterUniqueTogether(
name='projectitem',
unique_together=set([('resume', 'project')]),
),
migrations.AlterUniqueTogether(
name='languageitem',
unique_together=set([('language', 'resume')]),
),
migrations.AlterUniqueTogether(
name='certificationitem',
unique_together=set([('certification', 'resume')]),
),
]
|
[
"montheanthony@hotmail.com"
] |
montheanthony@hotmail.com
|
99efb5dfc673fb87e757158b477d4a8646110efb
|
3ba314ca88e89dded85a3448e730e215c47f3ceb
|
/allSkyImagingModule/other/plotBaselines.py
|
2c3c09dff5104d0bbd0786c6549548e32e0cdff4
|
[] |
no_license
|
David-McKenna/allSkyImaging
|
9f3dc5984541a6d39617dd5fd583654d0901d685
|
e20a4bb48cca7814c32326177c121e149664764c
|
refs/heads/master
| 2021-07-06T11:28:58.345222
| 2019-02-06T13:21:06
| 2019-02-06T13:21:06
| 166,459,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,067
|
py
|
"""Summary
Attributes:
custom_debug (list): Debug list of activated station, increasing in order from 0 to 15 to ensure correct selections
Effelsberg_elements_20091110 (list): Effelsberg HBA tile activations
Generic_International_Station_20091110 (list): Generic HBA tile activations
"""
import matplotlib.pyplot as plt
import astropy.units as u
import numpy as np
import copy
import csv
import os
from astropy.coordinates import SkyCoord
randWalk = [ 1, 14, 15, 15, 12, 11, 10, 9, 2, 15, 1, 4, 8, 4, 7, 0, 7,
0, 14, 0, 11, 3, 2, 9, 5, 11, 3, 3, 11, 12, 3, 10, 12, 7,
15, 13, 10, 9, 8, 2, 2, 13, 13, 0, 5, 14, 0, 9, 2, 0, 5,
8, 6, 9, 8, 13, 13, 15, 2, 10, 13, 9, 13, 11, 7, 9, 11, 14,
13, 13, 12, 0, 0, 2, 8, 7, 6, 2, 10, 0, 4, 9, 2, 4, 3,
4, 11, 5, 6, 1, 15, 15, 14, 6, 14, 2]
Effelsberg_elements_20091110 = [1,4,13,15,11,9,14,1,15,0,8,2,11,3,14,0,2,4,3,0,0,2,12,12,12,12,15,11,14,15,7,5,1,0,3,10,1,11,0,12,12,1,6,7,0,10,9,6,15,14,11,7,2,0,7,12,15,8,13,3,7,6,3,15,11,1,4,11,8,1,8,15,4,0,5,6,12,0,12,15,3,7,14,8,3,12,12,2,9,8,14,2,5,6,12,0]
Generic_International_Station_20091110 = [15,0,15,3,9,15,14,2,0,3,4,14,10,8,5,15,12,0,2,11,3,12,12,1,5,4,4,8,6,3,0,5,3,11,3,2,8,15,13,8,3,2,9,1,14,8,8,0,12,13,0,11,15,3,12,3,13,3,10,5,0,10,1,6,4,10,3,15,3,14,0,12,0,7,0,12,7,3,13,0,7,3,15,4,14,4,3,8,4,9,12,0,14,9,3,11]
custom_debug = (range(16) * 8)[:len(Effelsberg_elements_20091110)]
def getAntOrder():
"""Get a list of arrays of increasing order, with lengths corresponding to a row of HBAs
Returns:
list: list of 97 elements, 96 tiles and the dummy tile denoted as None
"""
lengths = np.array([5,7,9,11,11,10,11,11,9,7,5])
initVar = 0
antennaeSets = []
for lenRow in lengths:
antennaeSets.append(np.arange(initVar, initVar + lenRow))
initVar += lenRow
antennaeSets[5] = np.concatenate([antennaeSets[5][:5], [None], antennaeSets[5][5:]])
return antennaeSets
def plotTiles(lengths, titleVar = "HBA Activations", separationLines = False):
"""Quick HBA layout plotter, noting activations from input dataset
Args:
lengths (TYPE): List of rows of activated HBA tile numbers.
titleVar (string): Plot title
separationLines (bool, optional): Toggle lines between each tile on the graph
Returns:
antLoc (np.array): Locations of activated tiles in the arbitrary coordinate system centered about 0
"""
antLoc = []
yOffset = halfEven(len(lengths)) * 5.15
plt.figure(figsize = (12,12))
plt.xlim([-36, 36])
plt.ylim([-36, 36])
internalOffset = (np.linspace(-0.5, 0.25, 4) + 0.125) * 5
internalOffset = np.array(np.meshgrid(internalOffset, internalOffset)).reshape(1, 2, -1)[0]
for idx, row in enumerate(lengths):
xOffset = -5.15 * halfEven(len(row))
if separationLines:
plt.axhline(yOffset + 2.5, lw = 5., c = '#2a5289', alpha = 0.3)
for element in row:
if idx == 3 and separationLines:
plt.axvline(xOffset - 2.5, lw = 5., c = '#2a5289', alpha = 0.3)
if element is not None:
colEle = element % 4
rowEle = ((element - colEle) % 16)
xOffsetEle = xOffset + internalOffset[0][colEle]
yOffsetEle = yOffset - internalOffset[1][rowEle]
plt.scatter(xOffset + internalOffset[0], yOffset + internalOffset[1], s = 95, marker = 's', edgecolors = 'k', c = '#327e91', alpha = 0.3)
plt.scatter([xOffsetEle], [yOffsetEle], s = 100, marker = 's', c = '#00f756', edgecolors = 'k')
antLoc.append([xOffsetEle, yOffsetEle])
xOffset += 5.15
yOffset -= 5.15
plt.title(titleVar)
plt.savefig('./{0}.png'.format(titleVar))
#plt.savefig("{0}.png".format(titleVar.strip(" ")))
return np.array(antLoc)
def getBaselines(antLoc):
"""Generate an array of baselines for the given input locations
Args:
antLoc (list): List of antenna locations
Returns:
baselines (list): List of baselines
"""
baselines = []
for idx, ant in enumerate(antLoc[:-1]):
for ant2 in antLoc[idx + 1:]:
diff = ant - ant2
baselines.append(diff)
baselines.append(-1. * diff)
return np.array(baselines)
def getUVWPlane(baselines, astropyCoord):
"""Convert baselines to a UVW sampling for a given point on the sky
Args:
baselines (np.array): List of baselines between antenna
astropyCoord (Skycoord): Astropy coordinate on the sky.
Returns:
TYPE: Description
"""
ra = astropyCoord.icrs.ra.to(u.rad)
dec = astropyCoord.icrs.dec.to(u.rad)
transformMaxtrix = np.array([
[np.sin(ra), np.cos(ra), 0],
[-1. * np.sin(dec) * np.cos(ra), np.sin(dec) * np.sin(ra), np.cos(dec)],
[np.cos(dec) * np.cos(ra), -1. * np.cos(dec) * np.sin(ra), np.sin(dec)]
])
if baselines.shape[1] != 3:
transformMaxtrix = transformMaxtrix[:2, :2]
uvw = np.dot(transformMaxtrix, baselines.T)
return uvw.T
def populateSelections(lengths, selections):
"""Given a structure of tiles and a list of activations per-tile, return an array in the
same structure as the tile layout but with the selected antenna in place of their ID number
Args:
lengths (list): HBA tile structure to repopulate
selections (list): List of each antenna activated in each HBA tile (0-15)
Returns:
newLengths (list): Repopulated list with the structure of lengths but contents of selections
"""
idx = 0
newLengths = copy.deepcopy(lengths)
for lenIdx, row in enumerate(lengths):
for rowIdx, element in enumerate(row):
if element is not None:
newLengths[lenIdx][rowIdx] = selections[idx]
idx += 1
return newLengths
def halfEven(inputVar):
"""Helper function: return the rounded, half of the input value
Args:
inputVar (float-like): Input variable
Returns:
float-like: Input /2 rounded down.
"""
return (inputVar - (inputVar % 2)) / 2
###
def getAntMap(station = 'IE613'):
"""Generate a list of array elements for a given station in the ETRS coordinate system (results in meters)
Args:
station (str, optional): Name of the station of interest
Returns:
fullArr (list): All elements relating to the station
hbaLoc (np.array): All HBA coordinates in the given station (x,y,z)
lbaLoc (np.array): All LBA coordinates in the given station (x,y,z)
"""
fullArr = []
hbaLoc = []
lbaLoc = []
if not os.path.exists("./etrs-antenna-positions.csv"):
print("Unable to find antenna database! Downloading from Github.")
import urllib
urllib.urlretrieve("https://raw.githubusercontent.com/lofar-astron/lofar-antenna-positions/master/share/lofarantpos/etrs-antenna-positions.csv", "./etrs-antenna-positions.csv")
print("Download Complete.")
with open("./etrs-antenna-positions.csv", 'rb') as fileRef:
elementReader = csv.reader(fileRef, delimiter = ',')
for row in elementReader:
if row[0] == station:
fullArr.append(row)
antType = row[1]
if antType == 'HBA':
hbaLoc.append(row[3:6])
elif antType == 'LBA':
lbaLoc.append(row[3:6])
else:
print('Unknown antenna type {0}'.format(antType))
hbaLoc = np.array(hbaLoc, dtype = float)
lbaLoc = np.array(lbaLoc, dtype = float)
return fullArr, hbaLoc, lbaLoc
def plotTitleSave(dataX, dataY, title, scatterSize = None):
"""Plot data, give it a title, save it to disk.
Args:
dataX (list-like): X data
dataY (list-like): Y data
title (string): Plot title, name of saved file (without .png suffix)
scatterSize (float-like, optional): Size of scatter points
"""
plt.figure(figsize = (20, 20))
plt.title(title)
if scatterSize:
plt.scatter(dataX, dataY, s = scatterSize)
else:
plt.scatter(dataX, dataY)
plt.savefig(title + ".png")
if __name__ == '__main__':
listOfAnt = getAntOrder()
effelsPlotVar = populateSelections(listOfAnt, Effelsberg_elements_20091110)
genericPlotVar = populateSelections(listOfAnt, Generic_International_Station_20091110)
debugPlotVar = populateSelections(listOfAnt, custom_debug)
debugWalkPlotVar = populateSelections(listOfAnt, randWalk)
antLocEffels = plotTiles(effelsPlotVar, "HBA Activations using the Effelsberg Scheme", True)
antLocGeneric = plotTiles(genericPlotVar, "HBA Activations using the Generic Scheme", True)
antLocDebug = plotTiles(debugPlotVar, "HBA Activations for the Debug Scheme", True)
antLocDebugWalk = plotTiles(debugWalkPlotVar, "HBA Activations for the Debug Random Walk Scheme", True)
baselinesEffels = getBaselines(antLocEffels)
baselinesGeneric = getBaselines(antLocGeneric)
baselinesWalk = getBaselines(antLocDebugWalk)
#__, __, lbaLoc = getAntMap('DE601')
#effelsLBABaselines = getBaselines(lbaLoc)
plotTitleSave(baselinesEffels[:, 0], baselinesEffels[:, 1], "Snapshot Baselines for Effelsberg Scheme", 5)
plotTitleSave(baselinesGeneric[:, 0], baselinesGeneric[:, 1], "Snapshot Baselines for Generic Scheme", 5)
plotTitleSave(baselinesWalk[:, 0], baselinesWalk[:, 1], "Snapshot Baselines for Random Walk Scheme", 5)
#plotTitleSave(lbaLoc[:, 0], lbaLoc[:, 1], "Effelsberg LBA Station")
#plotTitleSave(effelsLBABaselines[:, 0], effelsLBABaselines[:, 1], "Effelsberg LBA Baselines", 2.)
plt.show()
|
[
"mckennd2@tcd.ie"
] |
mckennd2@tcd.ie
|
e6a1d1e2537da737dd5945f13b9c80f2178437ad
|
0c6bacd871337375c0c67dae003ebebd3bd58da3
|
/ReadnPlot_CourtCaseData_Boxplt.py
|
1f454b3e8f220a01451f23fd792f33043d1990cc
|
[] |
no_license
|
7676444908/Python_Plots
|
3ac3354bae72c07ab9afcf1896755ed3398684b4
|
d8627634995a2705991bee99bda3cca952027a73
|
refs/heads/main
| 2023-08-31T13:22:59.394302
| 2021-10-02T07:59:19
| 2021-10-02T07:59:19
| 412,438,017
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#import library
import pandas as pd
import matplotlib.pyplot as plt
#add csv file to dataframe
df = pd.read_csv('LIMBS_Ministry_Wise_Court_Case_Data_2021.csv')
#create boxplot
boxplot = df.boxplot(figsize = (5,5))
plt.show()
plt.savefig('MinistryVsTotalNumberOfcases_Boxplt.png')
|
[
"noreply@github.com"
] |
7676444908.noreply@github.com
|
84f1a5a4a81545c54e619e1ab0d7d893f0d61d84
|
4fbf4a6583f5df66d2b0e7b0c4b2016fc83dff78
|
/neural_network/network_module.py
|
22dff58d2149ecb253d4b994843414840f93c13d
|
[] |
no_license
|
jkwrobel/soft-glowing-potato
|
39819c795b26a2dc6e4ff460a171debebeba14e1
|
ed927826cc0a99a2ffb5c45c35db15c61ecfd36b
|
refs/heads/master
| 2023-09-02T06:17:32.456944
| 2021-10-23T10:49:51
| 2021-10-23T10:49:51
| 414,573,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
import numpy as np
class Network:
def __init__(self):
self.layers = []
def add_layer(self, layer):
self.layers.append(layer)
def use_trained_network(self, input_data):
samples = len(input_data)
result = []
for i in range(samples):
output = input_data[i]
for layer in self.layers:
output = layer.forward_propagation(output)
result.append(output)
return result
def train_network(self, given_inputs, expected_outputs, epochs, learning_rate):
samples = len(given_inputs)
for i in range(epochs):
error_for_one_epoch = 0
for j in range(samples):
output = given_inputs[j]
for layer in self.layers:
output = layer.forward_propagation(output)
error = mean_square_error_der(expected_outputs[j], output)
for layer in reversed(self.layers):
error = layer.backward_propagation(error, learning_rate)
def mean_square_error(expected_values, gotten_values):
return np.mean(np.power(expected_values - gotten_values, 2))
def mean_square_error_der(expected_values, gotten_values):
return 2 * (gotten_values - expected_values) / expected_values.size
|
[
"anita.tereszczuk@gmail.com"
] |
anita.tereszczuk@gmail.com
|
6229d71ac4298b44124dd4b8e60fbc94f362f721
|
22f57701df31b3182f3bcb83da729ecc584f8fb6
|
/December-12/py_anuppriya_revsinglylinkedlist.py
|
eca33e40110bce5169894193a53714e455c02d79
|
[] |
no_license
|
Prashant-Bharaj/A-December-of-Algorithms
|
e88640c711abbe2e6cac71cb4652dac243984484
|
7bbd56572f4ddc9648e90615ee810765544c56e4
|
refs/heads/master
| 2023-08-05T15:37:20.362561
| 2021-09-19T05:51:53
| 2021-09-19T05:51:53
| 287,055,360
| 0
| 0
| null | 2020-08-12T15:53:05
| 2020-08-12T15:53:04
| null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def reverse(self):
prev = None
current = self.head
while(current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printList(self):
temp = self.head
while(temp):
print (temp.data)
temp = temp.next
llist = LinkedList()
llist.push(13)
llist.push(18)
llist.push(22)
llist.push(48)
print( "Given Linked List")
llist.printList()
llist.reverse()
print ("\nReversed Linked List")
llist.printList()
|
[
"noreply@github.com"
] |
Prashant-Bharaj.noreply@github.com
|
df9ace102e90996a035fceb143ada46c73bb9bc5
|
ac0ccedfbe6270030347341a6befc850c2093af2
|
/astropy/version_helpers.py
|
35e569c6c793de91a81a2140d0f73088012aa424
|
[] |
no_license
|
pkaminski/sample
|
99fa7bd638001890f38d261495dc8f858feb09a4
|
c8d4fa1e951f73e1553fe3604bb5e77b62a6e52b
|
refs/heads/master
| 2023-06-22T01:50:43.473811
| 2023-04-04T23:17:54
| 2023-04-04T23:17:54
| 21,379,740
| 1
| 5
| null | 2023-08-31T00:07:37
| 2014-07-01T08:02:32
|
C
|
UTF-8
|
Python
| false
| false
| 8,461
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
##############################################################################
# Note: this file exists only for backward-compatibility purposes - the #
# contents have been moved to the separate astropy-helpers package, #
# located at https://github.com/astropy/astropy-helpers. Any new #
# development or bug fixes should be done there. #
##############################################################################
"""
Utilities for generating the version string for Astropy (or an affiliated
package) and the version.py module, which contains version info for the
package.
Within the generated astropy.version module, the `major`, `minor`, and `bugfix`
variables hold the respective parts of the version number (bugfix is '0' if
absent). The `release` variable is True if this is a release, and False if this
is a development version of astropy. For the actual version string, use::
from astropy.version import version
or::
from astropy import __version__
"""
from __future__ import division
import datetime
import imp
import os
import subprocess
import sys
from distutils import log
from warnings import warn
def _version_split(version):
"""
Split a version string into major, minor, and bugfix numbers (with bugfix
optional, defaulting to 0).
"""
for prerel in ('.dev', 'a', 'b', 'rc'):
if prerel in version:
version = version.split(prerel)[0]
versplit = version.split('.')
major = int(versplit[0])
minor = int(versplit[1])
bugfix = 0 if len(versplit) < 3 else int(versplit[2])
return major, minor, bugfix
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
#otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If None, the location of the file this function is in
is used to infer the git repository location. If given a filename it
uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revsion number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
from .utils import find_current_module
if path is None:
try:
mod = find_current_module(1, finddiff=True)
path = os.path.abspath(mod.__file__)
except (ValueError, AttributeError):
path = __file__
if not os.path.isdir(path):
path = os.path.abspath(os.path.split(path)[0])
if sha:
cmd = 'rev-parse' # Faster for getting just the hash of HEAD
else:
cmd = 'rev-list'
try:
p = subprocess.Popen(['git', cmd, 'HEAD'], cwd=path,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warn('Error running git: ' + str(e))
return ''
if p.returncode == 128:
if show_warning:
warn('No git repository present! Using default dev version.')
return ''
elif p.returncode != 0:
if show_warning:
warn('Git failed while determining revision count: ' + stderr)
return ''
if sha:
return stdout.decode('utf-8')[:40]
else:
nrev = stdout.decode('utf-8').count('\n')
return str(nrev)
# This is used by setup.py to create a new version.py - see that file for
# details. Note that the imports have to be absolute, since this is also used
# by affiliated packages.
_FROZEN_VERSION_PY_TEMPLATE = """
# Autogenerated by {packagename}'s setup.py on {timestamp}
from astropy.version_helpers import update_git_devstr, get_git_devstr
_last_generated_version = {verstr!r}
version = update_git_devstr(_last_generated_version)
githash = get_git_devstr(sha=True, show_warning=False)
major = {major}
minor = {minor}
bugfix = {bugfix}
release = {rel}
debug = {debug}
try:
from .utils._compiler import compiler
except ImportError:
compiler = "unknown"
try:
from .cython_version import cython_version
except ImportError:
cython_version = "unknown"
"""[1:]
def _get_version_py_str(packagename, version, release, debug):
timestamp = str(datetime.datetime.now())
major, minor, bugfix = _version_split(version)
if packagename.lower() == 'astropy':
packagename = 'Astropy'
else:
packagename = 'Astropy-affiliated package ' + packagename
return _FROZEN_VERSION_PY_TEMPLATE.format(packagename=packagename,
timestamp=timestamp,
verstr=version,
major=major,
minor=minor,
bugfix=bugfix,
rel=release, debug=debug)
def generate_version_py(packagename, version, release=None, debug=None):
"""Regenerate the version.py module if necessary."""
from .setup_helpers import is_distutils_display_option
from .utils.compat.misc import invalidate_caches
try:
version_module = __import__(packagename + '.version',
fromlist=['_last_generated_version',
'version', 'release', 'debug'])
try:
last_generated_version = version_module._last_generated_version
except AttributeError:
# Older version.py with no _last_generated_version; this will
# ensure a new version.py is written
last_generated_version = None
current_release = version_module.release
current_debug = version_module.debug
except ImportError:
version_module = None
last_generated_version = None
current_release = None
current_debug = None
if release is None:
# Keep whatever the current value is, if it exists
release = bool(current_release)
if debug is None:
# Likewise, keep whatever the current value is, if it exists
debug = bool(current_debug)
version_py = os.path.join(packagename, 'version.py')
if (last_generated_version != version or current_release != release or
current_debug != debug):
if '-q' not in sys.argv and '--quiet' not in sys.argv:
log.set_threshold(log.INFO)
if is_distutils_display_option():
# Always silence unnecessary log messages when display options are
# being used
log.set_threshold(log.WARN)
log.info('Freezing version number to {0}'.format(version_py))
with open(version_py, 'w') as f:
# This overwrites the actual version.py
f.write(_get_version_py_str(packagename, version, release, debug))
invalidate_caches()
if version_module:
imp.reload(version_module)
|
[
"piotr@ideanest.com"
] |
piotr@ideanest.com
|
b744b470d15f349edecd402d6a31f8f606303fe8
|
999e60669f7ad086e452a21e40827af418220d9a
|
/scraper.py
|
12b570cda4755c426777af632ae0f25cfa81c164
|
[] |
no_license
|
gouravrana7/Scrapping
|
5fb88b77cb7b008fccd3eb812b308686b53ea921
|
b7d3e68b725e592476e6601f25a7596b27590de2
|
refs/heads/main
| 2023-04-03T20:14:09.696318
| 2021-04-23T21:03:52
| 2021-04-23T21:03:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,792
|
py
|
import requests
from bs4 import BeautifulSoup
import pandas
import argparse
import connect
parser = arg.ArgumentParser()
parser.add_argument("--page_num_max",help="Enter the number of pages to parse",type=int)
parser.add_argument("--dbname",help="Enter the name of db",type=str)
args= parser.parse_args()
oyo_url = "https://www.oyorooms.com/hotels-in-bangalore/?page="
page_num_MAX = args.page_num_max
scraped_info_list = []
connect.connect(args.dbname)
for page_num in range(1, page_num_MAX):
url = oyo_url + str(page_num)
print("GET Request for:" + url)
req= requets.get(url)
content= req.content
soup = BeautifulSoup(content,"html.parser")
all_hotels = soup.find_all("div",{"class": "hotelCardListing"})
for hotel in all_hotels:
hotel_dict ={}
hotel_dict["name"]= hotel.find("h3",{"class":"ListingHotelDescription__hotelName"}).text
hotel_dict["adress"]= hotel.find("span",{"itemprop":"streetAdress"}).text
hotel_dict["name"]= hotel.find("span",{"class":"ListingPrice__finalPrice"}).text
try:
hotel_dict["price"]= hotel.find("span",{"class":"hotelRating__ratingSummary"}).text
except AttributeError:
pass
parent_amenities_element = hotel.find("div",{"class":"amenityWrapper"})
amenities_list=[]
for amenity in parent_amenities_element.find_all("div",{"class":"amenityWrapper__amenity"}):
amenities_list.append(amenity.find("span",{"class": "d-body-sm"}).text.strip())
hotel_dict["amenities"]=', '.join(amenities_list[:-1])
scraped_info_list.append(hotel_dict)
connect.insert_into_table(args.dbname, tuple(hotel_dict.values()))
|
[
"noreply@github.com"
] |
gouravrana7.noreply@github.com
|
60f07cd2e6c576a0f3a3b297e0f7597fded8febc
|
7d431b02b5c3de04e72f1a6ba5059531232147dc
|
/rlockEjemplo1Fix.py
|
6f3742b133b90a4b4a5bc3d4ba8b81ae11294e0b
|
[] |
no_license
|
msunh/Concurrente_deadLocksClase3_2021
|
7c9aba8d5254665a302df6a97ce314a7d7861c36
|
c4e7a7211cdbe09a352e2d46efe6efce7b79313f
|
refs/heads/master
| 2023-05-30T20:10:40.252297
| 2021-04-24T00:00:24
| 2021-04-24T00:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
import threading
import logging
logging.basicConfig(format='%(asctime)s.%(msecs)03d [%(threadName)s] - %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
lock = threading.RLock()
def funcA():
lock.acquire()
try:
logging.info(f'funcion A')
funcB()
finally:
lock.release()
def funcB():
lock.acquire()
try:
logging.info(f'funcion B')
finally:
lock.release()
t1 = threading.Thread(target=funcA)
t2 = threading.Thread(target=funcB)
t1.start()
t2.start()
|
[
"dbuaon@gmail.com"
] |
dbuaon@gmail.com
|
632adddee0bf6558d604dbfdf73b4728e404debc
|
e55531da9f55cd93b619431e4c162859e5e7be2e
|
/printing_file_args.py
|
319873804564f86e2e13ac533a14654ae7b09fd0
|
[] |
no_license
|
rawatsandeep1989/git-branches
|
69d77478d3b5de805268bc914e93a49d3e6a3af3
|
b589e1b4bd3f372d58f5b738d821394ae99b00da
|
refs/heads/master
| 2020-09-30T18:52:07.125462
| 2019-12-11T16:52:54
| 2019-12-11T16:52:54
| 227,351,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/env python3
import sys
print(sys.argv)
for i in range(len(sys.argv)):
if i==0:
print("Function name is %s :" % sys.argv[0])
else:
print("%d.argument %s:" % (i,sys.argv[i]))
print(sys.platform)
print(sys.version)
|
[
"sandeepr@one.com"
] |
sandeepr@one.com
|
fe4a6346bcb9bbbbbfa99d1e0c34646eaaeeb80d
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/03_model_fitting/mlrRecon/607-tideGauge.py
|
5d3a5857f4fc004b496e5aaa6fe495475221f2de
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,255
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
----------------------------------------------------
This program is designed to reconstruct merra daily
maximum surge using MLR
----------------------------------------------------
@author: Michael Tadesse
"""
def reconstruct():
"""
run KFOLD method for regression
"""
#import packages
import os
import pandas as pd
import statsmodels.api as sm
from datetime import datetime
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/mlrReconstruction"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 607
y = 608
#looping through
for tg in range(x,y):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
{
# #apply 10 fold cross validation
# kf = KFold(n_splits=10, random_state=29)
# metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
# for train_index, test_index in kf.split(X):
# X_train, X_test = X_pca[train_index], X_pca[test_index]
# y_train, y_test = y['surge'][train_index], y['surge'][test_index]
# #train regression model
# lm = LinearRegression()
# lm.fit(X_train, y_train)
# #predictions
# predictions = lm.predict(X_test)
# # pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# # pd.DataFrame(np.array(y_test))], \
# # axis = 1)
# # pred_obs.columns = ['pred', 'obs']
# # combo = pd.concat([combo, pred_obs], axis = 0)
# #evaluation matrix - check p value
# if stats.pearsonr(y_test, predictions)[1] >= 0.05:
# print("insignificant correlation!")
# continue
# else:
# #print(stats.pearsonr(y_test, predictions))
# metric_corr.append(stats.pearsonr(y_test, predictions)[0])
# #print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# # #number of years used to train/test model
# num_years = np.ceil((pred_surge['date'][pred_surge.shape[0]-1] -\
# pred_surge['date'][0]).days/365)
# longitude = surge['lon'][0]
# latitude = surge['lat'][0]
# num_pc = X_pca.shape[1] #number of principal components
# corr = np.mean(metric_corr)
# rmse = np.mean(metric_rmse)
# print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',\
# np.mean(metric_corr), ' - avg_rmse (m) = ', \
# np.mean(metric_rmse), '\n')
}
num_pc = X_pca.shape[1] #number of principal components
longitude = surge['lon'][0]
latitude = surge['lat'][0]
#surge reconstruction
pred_for_recon = pred[~pred.isna().any(axis = 1)]
pred_for_recon = pred_for_recon.reset_index().drop('index', axis = 1)
#standardize predictor data
dat = pred_for_recon.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred_for_recon['date'], dat_standardized], axis = 1)
X_recon = pred_standardized.iloc[:, 1:]
#apply PCA
pca = PCA(num_pc) #use the same number of PCs used for training
pca.fit(X_recon)
X_pca_recon = pca.transform(X_recon)
#model preparation
#first train model using observed surge and corresponding predictors
X_pca = sm.add_constant(X_pca)
est = sm.OLS(y['surge'], X_pca).fit()
#predict with X_recon and get 95% prediction interval
X_pca_recon = sm.add_constant(X_pca_recon)
predictions = est.get_prediction(X_pca_recon).summary_frame(alpha = 0.05)
#drop confidence interval and mean_se columns
predictions.drop(['mean_se', 'mean_ci_lower','mean_ci_upper'], \
axis = 1, inplace = True)
#final dataframe
final_dat = pd.concat([pred_standardized['date'], predictions], axis = 1)
final_dat['lon'] = longitude
final_dat['lat'] = latitude
final_dat.columns = ['date', 'surge_reconsturcted', 'pred_int_lower',\
'pred_int_upper', 'lon', 'lat']
{
# plot - optional
# time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
# final_dat['date'] = pd.DataFrame(list(map(time_stamp, final_dat['date'])), columns = ['date'])
# surge['date'] = pd.DataFrame(list(map(time_stamp, surge['date'])), columns = ['date'])
# sns.set_context('notebook', font_scale = 2)
# plt.figure()
# plt.plot(final_dat['date'], final_dat['mean'], color = 'green')
# plt.scatter(surge['date'], surge['surge'], color = 'blue')
# prediction intervals
# plt.plot(final_dat['date'], final_dat['obs_ci_lower'], color = 'red', linestyle = "--", lw = 0.8)
# plt.plot(final_dat['date'], final_dat['obs_ci_upper'], color = 'red', linestyle = "--", lw = 0.8)
# confidence intervals
# plt.plot(final_dat['date'], final_dat['mean_ci_upper'], color = 'black', linestyle = "--", lw = 0.8)
# plt.plot(final_dat['date'], final_dat['mean_ci_lower'], color = 'black', linestyle = "--", lw = 0.8)
}
#save df as cs - in case of interruption
os.chdir(dir_out)
final_dat.to_csv(tg_name)
#cd to dir_in
# os.chdir(dir_in)
reconstruct()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
e1e14c735947532850f9450d2694f3e8f7bca58d
|
9bd755b248c0534b004eae999dbab7ee36ba219b
|
/oneDriveLinkGeneratorFromYAML
|
a3e8a8b1174011bf0d9fc7d18e5f139ad7743f0e
|
[] |
no_license
|
ceberous/osxSettings
|
2507c39bb7b2e2ac4ed9b8adb234f7d47095593c
|
add1e1cc64aa403f9d6922f5080b13b371cd9a4c
|
refs/heads/master
| 2023-06-11T00:38:52.335759
| 2023-06-04T21:42:05
| 2023-06-04T21:42:05
| 186,904,141
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
#!/usr/bin/env python3
# pip install pyyaml
import yaml
import sys
from pathlib import Path
def read_yaml( file_path ):
with open( file_path ) as f:
return yaml.safe_load( f )
def write_text( file_path , text_lines_list ):
with open( file_path , 'w', encoding='utf-8' ) as f:
f.writelines( text_lines_list )
input_yaml_file_path = sys.argv[ 1 ]
links = read_yaml( input_yaml_file_path )
for title , url in links.items():
write_text( str( Path.cwd().joinpath( f"{title}.url" ) ) , [ "[InternetShortcut]\n" , f"URL={url[0]}\n" ] )
|
[
"cerbus.collin@gmail.com"
] |
cerbus.collin@gmail.com
|
|
0c952f5626c7a7187c2ce0175469a5ae5d62cbc9
|
26a0941b02286518e382fe86daa0dd5c0f596a9a
|
/stage_scenes.py
|
26bf98175d74488c0e99843bcaa5d0d4709e9ced
|
[
"MIT"
] |
permissive
|
Gargaran/videos
|
729c3c7e91cb20e5377b5e397b3b90ea91e3f8a1
|
26458da42fc665eb4ae844168c16ebb0526cc231
|
refs/heads/master
| 2023-08-22T16:36:33.235479
| 2021-10-06T22:48:08
| 2021-10-06T22:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,104
|
py
|
#!/usr/bin/env python
import inspect
import os
import sys
import importlib
from manimlib.config import get_module
from manimlib.extract_scene import is_child_scene
def get_sorted_scene_classes(module_name):
module = get_module(module_name)
if hasattr(module, "SCENES_IN_ORDER"):
return module.SCENES_IN_ORDER
# Otherwise, deduce from the order in which
# they're defined in a file
importlib.import_module(module.__name__)
line_to_scene = {}
name_scene_list = inspect.getmembers(
module,
lambda obj: is_child_scene(obj, module)
)
for name, scene_class in name_scene_list:
if inspect.getmodule(scene_class).__name__ != module.__name__:
continue
lines, line_no = inspect.getsourcelines(scene_class)
line_to_scene[line_no] = scene_class
return [
line_to_scene[index]
for index in sorted(line_to_scene.keys())
]
def stage_scenes(module_name):
scene_classes = get_sorted_scene_classes(module_name)
if len(scene_classes) == 0:
print("There are no rendered animations from this module")
return
# TODO, fix this
animation_dir = os.path.join(
os.path.expanduser('~'),
"Dropbox/3Blue1Brown/videos/2021/poly_fractal/videos"
)
#
files = os.listdir(animation_dir)
sorted_files = []
for scene_class in scene_classes:
scene_name = scene_class.__name__
clips = [f for f in files if f.startswith(scene_name + ".")]
for clip in clips:
sorted_files.append(os.path.join(animation_dir, clip))
# Partial movie file directory
# movie_dir = get_movie_output_directory(
# scene_class, **output_directory_kwargs
# )
# if os.path.exists(movie_dir):
# for extension in [".mov", ".mp4"]:
# int_files = get_sorted_integer_files(
# pmf_dir, extension=extension
# )
# for file in int_files:
# sorted_files.append(os.path.join(pmf_dir, file))
# else:
# animation_subdir = os.path.dirname(animation_dir)
count = 0
while True:
staged_scenes_dir = os.path.join(
animation_dir,
os.pardir,
"staged_scenes_{}".format(count)
)
if not os.path.exists(staged_scenes_dir):
os.makedirs(staged_scenes_dir)
break
# Otherwise, keep trying new names until
# there is a free one
count += 1
for count, f in reversed(list(enumerate(sorted_files))):
# Going in reversed order means that when finder
# sorts by date modified, it shows up in the
# correct order
symlink_name = os.path.join(
staged_scenes_dir,
"Scene_{:03}_{}".format(
count, f.split(os.sep)[-1]
)
)
os.symlink(f, symlink_name)
if __name__ == "__main__":
if len(sys.argv) < 2:
raise Exception("No module given.")
module_name = sys.argv[1]
stage_scenes(module_name)
|
[
"grant@3blue1brown.com"
] |
grant@3blue1brown.com
|
c0df0c354149cb0ce8785e02e9e03415ce691339
|
159b858a5a3816ed1f42c4d28d24d6a12c48c7dd
|
/captioning/models/AttModel.py
|
0ae51374aace5e35c899bf74ac4e5ee2eef8780e
|
[] |
no_license
|
gnehz/Image-Captioning-Attack
|
23c95bfa2128355ce2668f67b93a6dcf790bc9a5
|
25f28b7aa9b7aa2c7367a00fb1ccc6392c8a9f05
|
refs/heads/main
| 2023-03-20T21:46:11.781330
| 2021-02-26T16:46:40
| 2021-02-26T16:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45,761
|
py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential( *(nn.Linear(self.fc_feat_size, self.rnn_size) ,nn.ReLU(),nn.Dropout(self.drop_prob_lm)))
self.att_embed =nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
if output_logsoftmax:
logits = self.logit(output)
logsoft = F.softmax(logits, dim=1)
logprobs = F.log_softmax(logits, dim=1)
else:
logprobs = self.logit(output)
#----------------
logsoft = F.softmax(logits, dim=1)
#----------------
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, att_masks=None,opt={},found_sub_label = False, index=None, value=None):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
if not found_sub_label:
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
else:
self.done_beams = self.insertind_beam_search(state, logprobs, index,value,p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
print(sample_n)
print(beam_size)
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
print(seq_len)
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, att_masks=None, found_sub_label = False, index=None, value=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, att_masks,opt,found_sub_label, index, value)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size #512
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size#512
self.num_layers = opt.num_layers#1
self.drop_prob_lm = opt.drop_prob_lm#0.5
self.fc_feat_size = opt.fc_feat_size #2048
self.att_feat_size = opt.att_feat_size #2048
self.att_hid_size = opt.att_hid_size#512
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
[
"noreply@github.com"
] |
gnehz.noreply@github.com
|
2c5a2962fb4007cb8be4e05528c8e254ece4755b
|
c8fe9b0294c5a48f44fc051eb83bb8e13698035c
|
/loader.py
|
c5fd919b6ee9d7392288f728bb2173d7cfdcf693
|
[] |
no_license
|
danilasimanok/CommunicatorBot
|
8a388b9e0a3bb84652ef100159367a26a4bde67c
|
3a96239f4f910c5eb7a1b3fbcf330cfc23a2f586
|
refs/heads/master
| 2020-07-30T10:14:23.951061
| 2019-10-02T18:47:32
| 2019-10-02T18:47:32
| 210,188,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
import json
class Loader:
'''Работает с настройками программы.'''
def __init__(self, settings_file_name):
self.settings = None
self.dump_file_name = None
if settings_file_name == None:
return
settings = open(settings_file_name, 'r')
self.settings = json.load(settings)
settings.close()
def create_settings(self, new_managers, managers, connection_settings):
if self.settings != None:
return
self.settings = {'new_managers' : new_managers, 'managers' : managers, 'connection_settings' : connection_settings}
def save(self):
if self.dump_file_name == None or self.settings == None:
return
dump_file = open(self.dump_file_name, 'w')
json.dump(self.settings, dump_file)
dump_file.close()
if __name__ == '__main__':
loader = Loader('settings.json')
nmanager1 = {'name' : 'Петя',}
nmanager2 = {'name' : 'Борис',}
nmanager3 = {'name' : 'Вова',}
new_managers = {'pw1' : nmanager1, 'pw2' : nmanager2, 'pw3' : nmanager3,}
manager = {'name' : 'Василий',}
managers = {1488228 : manager,}
connection_settings = {'host' : 'somechars', 'username' : 'somechars', 'passwd' : 'somechars', 'database' : 'somechars'}
loader.create_settings(new_managers, managers, connection_settings)
loader.dump_file_name = 'settings.json'
print(loader.settings)
|
[
"dsimanok@bk.ru"
] |
dsimanok@bk.ru
|
eef2e035f96020685aaf6b0ac1d0d111b44c3a97
|
94b589fe80563baba803a4853a29fb8b8bee2b12
|
/venv/TestEngine/blog/views.py
|
29f83506c6c16638c83aed67ebe8ffd9c8168ec5
|
[] |
no_license
|
Python1337/CCM
|
ca9c81c73af8b3b0fbe765e9e63f05f7ca02a82f
|
08bf0106746de694020e60a080e457f69923b866
|
refs/heads/master
| 2023-03-11T04:57:41.247065
| 2021-02-18T11:56:13
| 2021-02-18T11:56:13
| 340,032,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
from django.shortcuts import render
from django.urls import reverse
from django.http import HttpResponse
from django.views.generic import View
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Post, Tag
from .utils import *
from .forms import TagForm, PostForm
from django.core.paginator import Paginator
def posts_list(request):
search_query = request.GET.get('search', '')
if search_query:
posts = Post.objects.filter(title__icontains=search_query)
else:
posts = Post.objects.all()
paginator = Paginator (posts, 2)
page_number = request.GET.get('page', 1)
page = paginator.get_page(page_number)
return render(request, 'blog/index.html', context={'page_object': page})
class PostAbout(View):
template = 'blog/about.html'
def get(self, request):
return render(request, self.template, context={})
class PostDetail(ObjectDetailMixin, View):
model = Post
template = 'blog/post_detail.html'
class PostCreate(LoginRequiredMixin, ObjectCreateMixin, View):
model_form = PostForm
template = 'blog/post_create_form.html'
raise_exception = True
class PostUpdate(LoginRequiredMixin, ObjectUpdateMixin, View):
model = Post
model_form = PostForm
template = 'blog/post_update_form.html'
raise_exception = True
class PostDelete(LoginRequiredMixin, ObjectDeleteMixin, View):
model = Post
template = 'blog/post_delete_form.html'
redirect_url = 'posts_list_url'
raise_exception = True
class TagCreate(LoginRequiredMixin, ObjectCreateMixin, View):
model_form = TagForm
template = 'blog/tag_create.html'
raise_exception = True
def tags_list(request):
tags = Tag.objects.all()
return render(request,'blog/tags_list.html', context={'tags': tags})
class TagUpdate(LoginRequiredMixin, ObjectUpdateMixin, View):
model = Tag
model_form = TagForm
template = 'blog/tag_update_form.html'
raise_exception = True
class TagDetail(ObjectDetailMixin, View):
model = Tag
template = 'blog/tag_detail.html'
class TagDelete(LoginRequiredMixin, ObjectDeleteMixin, View):
model = Tag
template = 'blog/tag_delete_form.html'
redirect_url = 'tags_list_url'
raise_exception = True
|
[
"wowartyom12a@inbox.ru"
] |
wowartyom12a@inbox.ru
|
21e0c6271798aca723cc58befb65b2e755533138
|
3f95904666cbecc5a65605e86f8b4dfe4797f8c5
|
/seven/timeseries.py
|
e08993380af5ac04c418f257f7f805c290abffe5
|
[] |
no_license
|
rlowrance/test7
|
60b2778e19d91c357304637d3e73d74c9bcd3b79
|
3535bd46bff602fc3ba35c080d38b30e75a97fe7
|
refs/heads/master
| 2021-07-18T11:42:20.784873
| 2017-10-24T14:27:52
| 2017-10-24T14:27:52
| 97,166,588
| 2
| 5
| null | 2017-08-16T15:06:16
| 2017-07-13T21:32:41
|
Python
|
UTF-8
|
Python
| false
| false
| 11,582
|
py
|
'''time series prediction
Copyright 2017 Roy E. Lowrance
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on as "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing premission and
limitation under the license.
'''
from abc import ABCMeta, abstractmethod
import collections
import pdb
import numbers
import numpy as np
import unittest
class CreateFeatures(object):
'create features file from a master file and possibly many associated information files'
def __init__(self):
self.n_input_records = None
self.n_output_records = None
self.skipped = None # collections.Counter for reasons input records were skipped
pass
def create(
self,
feature_makers=None,
master_file_records=None,
selected=None, # lambda index, master_record -> (use_record: Bool, maybe_error_message)
report_skipped_master_record=None, # lambda index, master_record, [None|feature_maker], msg -> None
# start optional arguments
verbose=False,
):
'yield sequence (features:Dict, index, master_record) of output records with features derived from the master file'
def error(msg):
print('error in feature maker %s feature_name %s feature_value %s' % (
feature_maker.name,
feature_name,
feature_value,
))
print(msg)
print('entering pdb')
pdb.set_trace()
assert feature_makers is not None
assert master_file_records is not None
assert selected is not None
assert report_skipped_master_record is not None
self.n_input_records = 0
self.n_output_records = 0
self.skipped = collections.Counter()
for index, master_record in master_file_records:
self.n_input_records += 1
if self.n_input_records % 10000 == 1:
print('creating features from master record %d index %s' % (self.n_input_records, index))
(use_record, msg) = selected(index, master_record)
if not use_record:
report_skipped_master_record(index, master_record, None, msg)
continue
# create features from the master_record
# the feature makers may incorporate data from other records
features_made = {}
stopped_early = False
# accumulate all the features from the feature makers
# check for errors on the way
for feature_maker in feature_makers:
maybe_features = feature_maker.make_features(index, master_record)
if isinstance(maybe_features, str):
report_skipped_master_record(index, master_record, feature_maker, maybe_features)
stopped_early = True
break
elif isinstance(maybe_features, dict):
for feature_name, feature_value in maybe_features.items():
if feature_name in features_made:
error('duplicate feature name')
elif not feature_name.startswith('id_') and not isinstance(feature_value, numbers.Number):
error('feature value is not numeric')
elif feature_name.endswith('_size') and feature_value < 0.0:
error('size feature is negative')
else:
features_made[feature_name] = feature_value
else:
print(feature_maker.name)
print(feature_maker)
print(maybe_features)
print(type(maybe_features))
error('unexpected return type from a feature_maker')
if stopped_early:
continue
self.n_output_records += 1
yield features_made, index, master_record
class FeatureMaker(object, metaclass=ABCMeta):
def __init__(self, name=None):
self.name = name # used in error message; informal name of the feature maker
@abstractmethod
def make_features(ticker_index, tickercusip, ticker_record):
'return errors:str or Dict[feature_name:str, feature_value:nmber]'
pass
FitPredictResult = collections.namedtuple(
'FitPredictResult',
(
'query_index',
'query_features',
'predicted_feature_name',
'predicted_feature_value',
'model_spec',
'prediction',
'fitted_model',
'n_training_samples',
)
)
class ExceptionFit(Exception):
def __init__(self, parameter):
self.parameter = parameter
def __str__(self):
return 'ExceptionFit(%s)' % str(self.parameter)
class FitPredict(object):
'fit models and predict targets'
def fit_predict(
self,
df_features=None,
df_targets=None,
make_model=None,
model_specs=None,
timestamp_feature_name=None,
already_seen_lambda=None, # lambda query_index, model_spec, predicted_feature_name: Bool
):
'yield either (True, result:FitPredictResult) or (False, error_msg:str)'
# df_targets: a sorted sequence of targets, sorted in timestamp order (y values)
sorted_targets = df_targets.sort_values(by=timestamp_feature_name)
for query_index in sorted_targets.index:
if query_index not in df_features.index:
yield False, 'no query feature for query index %s' % query_index
continue
query_features = df_features.loc[[query_index]] # must be a DataFrame
assert len(query_features) == 1
timestamp = query_features.iloc[0][timestamp_feature_name]
mask = sorted_targets[timestamp_feature_name] < timestamp
training_targets = sorted_targets.loc[mask]
if len(training_targets) == 0:
yield False, 'no training_targets for query index %s timestamp %s' % (query_index, timestamp)
continue
training_features = df_features.loc[training_targets.index]
if len(training_features) == 0:
yield False, 'no training_features for query index %s timestamp %s' % (query_index, timestamp)
continue
for predicted_feature_name, predicted_feature_value in sorted_targets.loc[query_index].items():
if predicted_feature_name.startswith('id_'):
continue # skip identifiers, as these are not features
if predicted_feature_name.endswith('_decreased') or predicted_feature_name.endswith('_increased'):
yield False, 'classification not yet implemented; target feature name %s' % predicted_feature_name
continue
for model_spec in model_specs:
if already_seen_lambda(query_index, model_spec, predicted_feature_name):
yield False, 'already seen: %s %s %s' % (query_index, model_spec, predicted_feature_name)
continue
m = make_model(model_spec, predicted_feature_name)
try:
# TODO: turn into keywords
m.fit(training_features, training_targets)
except ExceptionFit as e:
yield False, 'exception raised during fitting: %s' % str(e)
continue
predictions = m.predict(query_features)
assert len(predictions) == 1
prediction = predictions[0]
if np.isnan(prediction):
print('prediction is NaN', prediction)
print(model_spec)
pdb.set_trace()
if prediction is None:
yield False, 'predicted value was None: %s %s %s' % (query_index, model_spec, predicted_feature_name)
else:
yield (
True,
FitPredictResult(
query_index=query_index,
query_features=query_features,
predicted_feature_name=predicted_feature_name,
predicted_feature_value=predicted_feature_value,
model_spec=model_spec,
prediction=predictions[0],
fitted_model=m,
n_training_samples=len(training_features),
)
)
class FitPredictOutput(object, metaclass=ABCMeta):
'content of output file for program fit_predict.py'
@abstractmethod
def as_dict(self):
'return a dict with all the fields'
pass
class HpChoices(object, metaclass=ABCMeta):
'iterated over HpSpec instances'
@abstractmethod
def __iter__(self):
'yield sequence of HpSpec objects'
pass
class Model(object, metaclass=ABCMeta):
@abstractmethod
def fit(self, df_training_samples_features, df_training_samples_targets):
'mutate self; set attribute importances: Dict[feature_name:str, feature_importance:Number]'
pass
@abstractmethod
def predict(self, df_query_samples_features):
'return predictions'
pass
class ModelSpec(object, metaclass=ABCMeta):
'specification of a model name and its associated hyperparamters'
@abstractmethod
def __str__(self):
'return parsable string representation'
# Hint: Use method self._to_str(value) to convert individual values to strings
# That will make all the string representations use the same encoding of values to strings
pass
@staticmethod
@abstractmethod
def make_from_str(s):
'parse the representation returned by str(self) to create an instance'
pass
@abstractmethod
def iteritems(self):
'yield each (hyparameter name:str, hyperparameter value)'
pass
@abstractmethod
def __eq__(self, other):
pass
@abstractmethod
def __hash__(self):
pass
@abstractmethod
def __lt__(self, other):
pass
def _to_str(self, value):
'internal method. Convert value to a string. Use me in your __str__ method'
def remove_trailing_zeroes(s):
return (
s if s[-1] != '0' else
remove_trailing_zeroes(s[:-1])
)
if value is None:
return ''
elif isinstance(value, float):
return remove_trailing_zeroes(('%f' % value).replace('.', '_'))
elif isinstance(value, int):
return '%d' % value
else:
return str(value)
class TestHpChoices(unittest.TestCase):
def test_construction(self):
self.assertRaises(Exception, HpChoices, None)
class TestHpSpeC(unittest.TestCase):
def test_construction(self):
self.assertRaises(Exception, HpSpec, None)
if __name__ == '__main__':
unittest.main()
|
[
"roy.lowrance@gmail.com"
] |
roy.lowrance@gmail.com
|
d20bc6650a0bfcd14b6dc9e13f603aec12c00025
|
ab6f9e3505b9d51c9a604866572c547982aa88e3
|
/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/ws/build/config.gypi
|
18f2146182d182064c12ed05f088d6ad171e01af
|
[
"MIT"
] |
permissive
|
adamtheschmidt/BoardGameTracking
|
890a5d556a916c1990f11d51466d10843d9384c1
|
13b6c1722a946f98fd2cee4866817ffb84e93ec1
|
refs/heads/master
| 2021-01-21T00:16:54.038790
| 2015-04-23T04:22:32
| 2015-04-23T04:22:32
| 31,579,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,651
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"icu_data_file": "icudt54l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt54l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "54",
"node_install_npm": "true",
"node_prefix": "/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/data/opt/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/home/andrei/.node-gyp/0.12.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.5.1 node/v0.12.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"init_version": "1.0.0",
"user": "1000",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/andrei/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"save_dev": "true",
"depth": "Infinity",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/usr/bin/zsh",
"prefix": "/usr/local",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/andrei/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "",
"node_version": "0.12.0",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/andrei/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
|
[
"andreirtaylor@gmail.com"
] |
andreirtaylor@gmail.com
|
ed2d674bed27936b1176c7767143f026c35ea16c
|
9ed5f45abf25c5dfe70bc97ed76b726f22b4903b
|
/read_shiyo.py
|
fd3f488aae4bcb25840b46e5411e4851f781182b
|
[] |
no_license
|
aki4429/locdata
|
b2f95cf82d5b02183bafffd1565e6126daa1afd0
|
1af95670fd60209621dc25b3f81638468ae6fafb
|
refs/heads/master
| 2022-12-04T19:48:11.092611
| 2020-08-18T16:35:18
| 2020-08-18T16:35:18
| 288,508,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,296
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#code:
#:8=model, 8:11=spec, 11:17=piece, 20:24=legcolor, 24:31=fab1, 31:39=fab2
#39:40=toku
import glob
import csv
import datetime as dt
from dbread import Loc, session
shiyoname = glob.glob('./shiyo/*.csv')[0]
data = []
#納期は'yyyy/mm/dd'形式なので、/でスプリットして、int()で数のリストを作成
#datetime形式に変換
def s2d(hiduke):
hlist = hiduke.split('/')
hlist = [int(x) for x in hlist]
return dt.datetime(*hlist)
#縫製品カバーコードに変換
def repl(code):
model = code[:8].strip()
piece = code[11:17].strip()
fab1 = code[24:31].strip()
#CH232で35,37で無い場合
if 'CH232' in model and piece != '35' and piece != '37' :
model = "013" + model + 'WI'
elif 'CH232' in model and (piece == '35' or piece == '37') :
model = "013" + model + 'W'
elif 'CH271' in model and (piece != '35' or piece != '37') :
model = "013" + model + 'I'
else:
model = "013" + model
return model + "-" + piece + "C " + fab1
#shiyo 製品コード1 製番4 納期5 指示数6
#CH232とCH271の国産(N/NN)以外で、バイオーダー(Z)でないもの
with open( shiyoname, encoding='CP932') as f:
reader = csv.reader(f)
for row in reader:
if (row[1][8:11].strip() != "N" and row[1][8:11].strip() != "NN" and row[1][8:11].strip() != "NE" ) and ('CH232' in row[1][:8].strip() or 'CH271' in row[1][:8].strip()) and row[1][39:40] != "Z":
data.append([repl(row[1]), row[4], s2d(row[5]), row[6]])
#モデル別の情報
models = {}
model = ""
for row in data:
if model in models:
models[row[0]].append([row[1], row[2], row[3]])
model = row[0]
else:
models[row[0]] = []
models[row[0]].append([row[1], row[2], row[3]])
#日付別の情報
#shiyo 製品コード1 製番4 納期5 指示数6
dates = {}
for row in data:
if row[2] not in dates:
dates[row[2]] = []
dates[row[2]].append([row[0], row[1], row[3]])
else:
dates[row[2]].append([row[0], row[1], row[3]])
#日付のリスト
days = sorted(dates)
for d in days:
print(d.strftime('%m/%d') +":")
print(dates[d])
class Shiji:
def __init__(self, seiban, code, ):
|
[
"akiyoshi.oda@gmail.com"
] |
akiyoshi.oda@gmail.com
|
5f15528be224f35fb06cbfeb40c9415dba122387
|
00c71263dd3984599082534687a6eecff6b63278
|
/phrases.py
|
fa6da7c36dd1cb3d5848993055aa183f64336611
|
[] |
no_license
|
TheBlackParrot/discord-bot
|
1af35d00224c33d9167b784907c734e88b99b1ef
|
e83ab7db5f10732d1c588f1ad4ff7ca88ee9d9b3
|
refs/heads/master
| 2021-01-10T15:15:20.248729
| 2016-04-16T08:39:55
| 2016-04-16T08:39:55
| 54,535,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
import urllib.request;
import settings as setting;
def getAPhrase():
req = urllib.request.Request(
setting.PHRASES_URL,
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.108 Safari/537.36'
}
);
page = urllib.request.urlopen(req);
return page.read().decode('utf-8');
|
[
"theblackparrot0@gmail.com"
] |
theblackparrot0@gmail.com
|
5a34b1c2505774cc28123bf7867e9d5b84e9422c
|
ea5de3d347ef4e1dcac9ee37da2d9850888d9ecc
|
/pawn_brotherhood.py
|
57460c2b269a4526698cd78561b3aa401f2e81a2
|
[] |
no_license
|
skoriy88/Chekio
|
4d50c18c54741c425d468a80a24ceb526a13dabe
|
fcbc291ca624cb9d5415128e605ea27d5e50983e
|
refs/heads/master
| 2020-03-18T11:26:09.966384
| 2018-05-25T13:52:54
| 2018-05-25T13:52:54
| 134,671,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
lst = {"b4", "d4", "f4", "c3", "e3", "g5", "d2"}
def safe_pawns(inp):
new = {(ord(i[0]), int(i[1])) for i in inp}
safe = sum(1 for pawn in new if(pawn[0]-1, pawn[1]-1) in new or (pawn[0]+1, pawn[1]-1) in new)
#print(safe)
return safe
safe_pawns(lst)
'''
print(ord('a'))
print(ord('b'))
print(ord('c'))
print(ord('d'))
print(ord('e'))
print(ord('f'))
print(ord('g'))
print(ord('h'))
'''
|
[
"skoriy88@gmail.com"
] |
skoriy88@gmail.com
|
852411151db8afff623d48a858ba720238508dd7
|
faaf12ab18978082233c09628b815a69e73868e4
|
/codechef/practice/easy/lebombs.py
|
3c70653a1ff04cd448e8e83575cc876a870c045a
|
[
"WTFPL"
] |
permissive
|
ferhatelmas/algo
|
6826bcf0be782cb102c1ee20dce8d4345e1fd6d2
|
7b867f6d2c8a9fb896f464168b50dfc115617e56
|
refs/heads/master
| 2023-08-18T19:59:58.435696
| 2023-08-14T10:16:00
| 2023-08-14T10:16:00
| 3,813,734
| 27
| 16
|
WTFPL
| 2020-10-25T23:00:16
| 2012-03-23T23:43:31
|
Java
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
from sys import stdin
from itertools import groupby
for i, ln in enumerate(stdin):
if i > 0 and i % 2 == 0:
s = "0" + ln.rstrip() + "0"
print(sum(max(len(list(g)) - 2, 0) for k, g in groupby(s) if k == "0"))
|
[
"elmas.ferhat@gmail.com"
] |
elmas.ferhat@gmail.com
|
26e6a160f86d55b8f88188dec7e5c82c434f707a
|
462385714beddead618fe0193a68685365793e5c
|
/Python/Python_Problems/Rosalind-master/scripts/Data_Structures.py
|
d8b1c2c42ff09d8ec41fa4000eeb26bab2e3b2f1
|
[] |
no_license
|
0n1udra/Learning
|
7e3f83a3a1ed850cc476ef738c76759616d9e12e
|
69e656327c782465318db85abdee519e2f79dab0
|
refs/heads/master
| 2022-12-04T06:29:29.894406
| 2020-08-17T20:47:47
| 2020-08-17T20:47:47
| 73,001,058
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,811
|
py
|
#!/usr/bin/env python
'''A ROSALIND bioinformatics script containing useful data structures.'''
class SuffixTree(object):
'''Creates a suffix tree for the provided word.'''
def __init__(self, word):
'''Initializes the suffix tree.'''
self.nodes = [self.Node(None, 0)]
self.edges = dict()
self.descendants_dict = dict()
if type(word) == str:
self.add_word(word)
class Node(object):
'''Suffix tree node class.'''
def __init__(self, parent, number):
self.parent = parent
self.number = number
self.children = []
def add_child(self, child):
self.children.append(child)
def remove_child(self, child):
self.children.remove(child)
def update_parent(self, parent):
self.parent = parent
def add_word(self, word):
'''Add a word to the suffix tree.'''
# Check to make sure word ends in '$'.
if word[-1] != '$':
word += '$'
self.word = word
self.n = len(self.word)
for i in xrange(self.n):
parent_node, edge_start, overlap = self.insert_position(i, self.nodes[0])
if overlap:
p_edge_start, p_edge_end = self.edges[(parent_node.parent.number, parent_node.number)]
# Get the edge to insert
insert_len = 0
while word[edge_start:edge_start + insert_len] == word[p_edge_start:p_edge_start + insert_len]:
insert_len += 1
# Create a new node for insertion
new_node = self.Node(parent_node.parent, len(self.nodes))
new_node.add_child(parent_node)
self.add_node(parent_node.parent, p_edge_start, p_edge_start + insert_len - 1, new_node)
# Update the parent node since a new node is inserted above it
del self.edges[(parent_node.parent.number, parent_node.number)]
parent_node.parent.remove_child(parent_node)
parent_node.update_parent(new_node)
self.edges[(parent_node.parent.number, parent_node.number)] = [p_edge_start + insert_len - 1, p_edge_end]
# Add new child node
self.add_node(new_node, edge_start + insert_len - 1, self.n)
else:
# No insertion necessary, just append the new node.
self.add_node(parent_node, edge_start, self.n)
def insert_position(self, start_index, parent_node):
'''Determine the location and method to insert a suffix into the suffix tree.'''
for child_node in parent_node.children:
edge_start, edge_end = self.edges[(parent_node.number, child_node.number)]
if self.word[start_index:start_index + edge_end - edge_start] == self.word[edge_start:edge_end]:
return self.insert_position(start_index + edge_end - edge_start, child_node)
elif self.word[edge_start] == self.word[start_index]:
return child_node, start_index, True
return parent_node, start_index, False
def add_node(self, parent_node, edge_start, edge_end, child_node=None):
'''Adds a node and the associated edge to the suffix tree.'''
# Create child node, if necessary
if child_node is None:
child_node = self.Node(parent_node, len(self.nodes))
# Add node to node list
self.nodes.append(child_node)
# Add child to parent
parent_node.add_child(child_node)
# Add edge to edge dict
self.edges[(parent_node.number, child_node.number)] = [
edge_start, edge_end]
def print_edges(self):
'''Returns the string representations of the edges.'''
return [self.word[i:j] for i, j in self.edges.values()]
def total_descendants(self, base_node):
'''Returns the total number of descendants of a given node.'''
if base_node not in self.descendants_dict:
self.descendants_dict[base_node] = len(base_node.children) + sum([self.total_descendants(c) for c in base_node.children])
return self.descendants_dict[base_node]
def node_word(self, end_node):
'''Returns the prefix of the suffix tree word up to a given node.'''
current_word = ''
while end_node.number != 0:
temp_indices = self.edges[(end_node.parent.number, end_node.number)]
current_word = self.word[temp_indices[0]:temp_indices[1]] + current_word
end_node = end_node.parent
return current_word.strip('$')
class Trie(object):
'''Constructs a trie.'''
def __init__(self, word=None):
self.nodes = [[self.Node('', 1)]]
self.edges = []
if word is not None:
self.add_word(word)
class Node(object):
'''Trie node class.'''
def __init__(self, prefix, number):
self.prefix = prefix
self.number = number
self.depth = len(prefix)
class Edge(object):
'''Trie edge class.'''
def __init__(self, letter, par_node, chi_node):
self.letter = letter
self.parent_node = par_node
self.child_node = chi_node
def get_info(self):
'''Return the edge information compactly.'''
return ' '.join(map(str, [self.parent_node, self.child_node, self.letter]))
def add_word(self, word):
'''Adds a word to the trie.'''
if type(word) == list:
for w in word:
self.add_word(w)
else:
parent = self.find_parent(word)
for i in range(len(parent.prefix), len(word)):
new_node = self.Node(word[:i + 1], self.node_count() + 1)
self.edges.append(self.Edge(word[i], parent.number, self.node_count() + 1))
self.insert_node(new_node)
parent = new_node
def insert_node(self, node):
'''Determine the location to insert the current node.'''
if node.depth > self.depth():
self.nodes.append([node])
else:
self.nodes[node.depth].append(node)
def depth(self):
'''Returns the depth of the trie.'''
return len(self.nodes) - 1
def node_count(self):
'''Returns the total number of nodes.'''
count = 0
for trie_depth in self.nodes:
count += len(trie_depth)
return count
def find_parent(self, word):
'''Return the parent node of the word to be inserted.'''
for i in range(min(len(word), self.depth()), 0, -1):
for node in self.nodes[i]:
if word[:i] == node.prefix:
return node
return self.nodes[0][0]
|
[
"dxzt99@gmail.com"
] |
dxzt99@gmail.com
|
52256cac147a6460ee037ec680d08897777c207a
|
0a5a5f6837789b75dc611783bec25bee7af7d6ae
|
/slacktime.py
|
6457471b8a0a914eb569b7b73b1e8e1bd3f4d8ce
|
[] |
no_license
|
afcarl/slacktime
|
9bca69b975f73b067ecb0e97311a6b242747429a
|
803958f50e7b7ef2fb557ebf5bac169aac05ee54
|
refs/heads/master
| 2020-03-22T10:29:25.552740
| 2018-03-06T16:49:40
| 2018-03-06T16:49:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,174
|
py
|
"""The core slacktime model class and some functions for evaluating and optimizing the model."""
import numpy as np
from optunity_modified import minimize_structured, par
from input_texts import InputTexts
class Model(object):
def __init__(self, switch_cost=0):
self.textmatrix = np.array([])
self.ticks = 0
self.subprocesses = dict()
self.subprocess_names = []
self.locked_by = None
self.switch_ticks = 0
self.switch_cost = switch_cost
self.length = 0
def append_subprocess(self, name, duration, active_word, buffer_len, input_processes):
self.subprocesses[name] = {
'duration': duration,
'active_word': active_word,
'buffer_len': buffer_len,
'input_processes': input_processes
}
self.subprocess_names.append(name)
def load_text(self, textarray):
self.textmatrix = textarray
self.textmatrix['onset'] -= self.textmatrix['onset'][0]
self.length = self.textmatrix['onset'][-1]
def update(self):
for subprocess_name in self.subprocess_names:
if subprocess_name == 'input':
# update active word
where = np.where(self.textmatrix['onset'] <= self.ticks)
if len(where[0]) != 0:
active_word = int(np.max(where))
if len(self.subprocesses[subprocess_name]['active_word']) == 0:
self.subprocesses[subprocess_name]['active_word'].append(active_word)
elif active_word > self.subprocesses[subprocess_name]['active_word'][-1]:
self.subprocesses[subprocess_name]['active_word'].append(active_word)
else:
# move to the latest word where accumulated evidence exceeds the duration threshold
if type(self.subprocesses[subprocess_name]['input_processes']) is list:
input_process = self.subprocesses[subprocess_name]['input_processes'][0][0]
else:
input_process = self.subprocesses[subprocess_name]['input_processes'][0]
where = np.where(self.textmatrix[input_process] >= self.subprocesses[input_process]['duration'])
if len(where[0]) != 0:
active_word = int(np.max(where))
if len(self.subprocesses[subprocess_name]['active_word']) == 0:
self.subprocesses[subprocess_name]['active_word'].append(active_word)
elif active_word > self.subprocesses[subprocess_name]['active_word'][-1]:
self.subprocesses[subprocess_name]['active_word'].append(active_word)
# if evidence has passed threshold for active word in head of queue, pop it
if len(self.subprocesses[subprocess_name]['active_word']) > 0:
active_word = self.subprocesses[subprocess_name]['active_word'][0]
duration = self.subprocesses[subprocess_name]['duration']
if self.textmatrix[subprocess_name][active_word] > duration:
self.subprocesses[subprocess_name]['active_word'].pop(0)
# if queue is too long, pop head of queue
while (len(self.subprocesses[subprocess_name]['active_word']) >
self.subprocesses[subprocess_name]['buffer_len']):
self.subprocesses[subprocess_name]['active_word'].pop(0)
locked = False
# lock/unlock
if subprocess_name == 'lemma_sel':
if self.locked_by == 'lemma_sel':
if len(self.subprocesses['lemma_sel']['active_word']) == 0:
# count down switch cost and then unlock
if self.switch_ticks > 0:
self.switch_ticks -= 1
else:
self.locked_by = None
elif self.locked_by == 'concept_prep':
locked = True
else:
if len(self.subprocesses['lemma_sel']['active_word']) > 0:
self.locked_by = 'lemma_sel'
self.switch_ticks = self.switch_cost
elif subprocess_name == 'concept_prep':
if self.locked_by == 'concept_prep':
if len(self.subprocesses['concept_prep']['active_word']) == 0:
if self.switch_ticks > 0:
self.switch_ticks -= 1
else:
self.locked_by = None
elif self.locked_by == 'lemma_sel':
locked = True
else:
if len(self.subprocesses['concept_prep']['active_word']) > 0:
self.locked_by = 'concept_prep'
self.switch_ticks = self.switch_cost
# let evidence accumulate
if (len(self.subprocesses[subprocess_name]['active_word']) > 0) and (locked is False):
active_word = self.subprocesses[subprocess_name]['active_word'][0]
flow_rate = self.textmatrix['flow_rate'][active_word]
if type(self.subprocesses[subprocess_name]['input_processes']) is list:
for input_process in self.subprocesses[subprocess_name]['input_processes']:
input_process_name = input_process[0]
input_process_flow = input_process[1]
if self.textmatrix[input_process_name][active_word] >= self.subprocesses[input_process_name]['duration']:
self.textmatrix[subprocess_name][active_word] += (input_process_flow * flow_rate)
else:
input_process_flow = self.subprocesses[subprocess_name]['input_processes'][1]
self.textmatrix[subprocess_name][active_word] += (input_process_flow * flow_rate)
# advance clock
self.ticks += 1
def advance_clock(self, ticks):
for tick in range(ticks):
self.update()
def build_model(task, duration_factor=1.0, buffer_len=1, shortcut_flow=0.0, switch_cost=0):
model_spec = [
{'name': 'input',
'duration': 1,
'active_word': [0],
'buffer_len': 1,
'input_processes': (None, 1)},
{'name': 'phon_feat_ex',
'duration': 75,
'active_word': [],
'buffer_len': 1,
'input_processes': ('input', 1)},
{'name': 'segment',
'duration': 125,
'active_word': [],
'buffer_len': 1,
'input_processes': ('phon_feat_ex', 1)},
{'name': 'phon_code_sel',
'duration': 90,
'active_word': [],
'buffer_len': 1,
'input_processes': ('segment', 1)},
{'name': 'lemma_sel',
'duration': 150,
'active_word': [],
'buffer_len': 1,
'input_processes': ('phon_code_sel', 1)},
{'name': 'concept_prep',
'duration': 175,
'active_word': [],
'buffer_len': 1,
'input_processes': ('lemma_sel', 1)},
{'name': 'lemma_ret',
'duration': 75,
'active_word': [],
'buffer_len': 1,
'input_processes': ('concept_prep', 1)},
{'name': 'phon_code_ret',
'duration': 80,
'active_word': [],
'buffer_len': 1,
'input_processes': ('lemma_ret', 1)},
{'name': 'syllab',
'duration': 125,
'active_word': [],
'buffer_len': 1,
'input_processes': ('phon_code_ret', 1)},
{'name': 'phonetic_enc',
'duration': 145,
'active_word': [],
'buffer_len': 1,
'input_processes': ('syllab', 1)},
{'name': 'output',
'duration': 1,
'active_word': [],
'buffer_len': 1,
'input_processes': ('phonetic_enc', 1)},
]
if task == 'shadowing':
for i in range(len(model_spec)):
if model_spec[i]['name'] == 'syllab':
model_spec[i]['input_processes'] = [('segment', shortcut_flow), ('phon_code_sel', 1), ('phon_code_ret', 1)]
elif task == 'interpreting':
for i in range(len(model_spec)):
if model_spec[i]['name'] == 'concept_prep':
model_spec[i]['buffer_len'] = int(buffer_len)
for i in range(len(model_spec)):
model_spec[i]['duration'] = duration_factor * model_spec[i]['duration']
model = Model(switch_cost=switch_cost)
for process in model_spec:
model.append_subprocess(**process)
return model
def default_inputs(folder='input_data', function_word_rate=2.0):
input_object = InputTexts(folder=folder, function_word_rate=function_word_rate)
return input_object.input_dict
def build_text(words):
num = len(words)
text = np.zeros(num, dtype=[('onset', 'i4'),
('duration', 'i4'),
('flow_rate', 'f4'),
('input', 'f4'),
('phon_feat_ex', 'f4'),
('segment', 'f4'),
('phon_code_sel', 'f4'),
('lemma_sel', 'f4'),
('concept_prep', 'f4'),
('lemma_ret', 'f4'),
('phon_code_ret', 'f4'),
('syllab', 'f4'),
('phonetic_enc', 'f4'),
('output', 'f4')])
text['onset'] = words[:, 0]
text['duration'] = words[:, 1]
text['flow_rate'] = words[:, 2]
return text
def evaluate(task='shadowing', input_dict=None, function_word_rate=2.0, **params):
if input_dict is None:
input_dict = default_inputs(function_word_rate=function_word_rate)
scores_dict = {
'100wpm': dict(),
'125wpm': dict(),
'150wpm': dict(),
'175wpm': dict(),
'200wpm': dict(),
}
mean_scores = {
'100wpm': [],
'125wpm': [],
'150wpm': [],
'175wpm': [],
'200wpm': [],
}
for wpm, titles in input_dict.items():
for title, words in titles.items():
model = build_model(task, **params)
text = build_text(words)
model.load_text(text)
model.advance_clock(model.length + 3000)
unique, counts = np.unique(model.textmatrix['output'], return_counts=True)
score = (len(words) - float(counts[0])) / len(words)
scores_dict[wpm][title] = score
for wpm, titles in scores_dict.items():
for title, score in titles.items():
mean_scores[wpm].append(score)
for wpm, scores in mean_scores.items():
mean_scores[wpm] = np.mean(scores)
return mean_scores, scores_dict
def loss_function(**params):
errors = []
pp_scores = {
'shadowing': {
'100wpm': .95,
'125wpm': .95,
'150wpm': .92,
'175wpm': .86,
'200wpm': .81
},
'interpreting': {
'100wpm': .87,
'125wpm': .81,
'150wpm': .74,
'175wpm': .69,
'200wpm': .59
}
}
for task in ['shadowing', 'interpreting']:
model_scores, _ = evaluate(task, **params)
errors += [model_scores[wpm] - pp_score for wpm, pp_score in pp_scores[task].items()]
return np.sqrt(np.mean(np.square(errors)))
def minimize_loss(params,
iterations=1,
cores=1,
method='random search'):
pmap = par.create_pmap(cores)
minimal_params, details, solver = minimize_structured(loss_function,
params,
method,
num_evals=iterations,
pmap=pmap)
return minimal_params, details, solver
|
[
"jvparidon@gmail.com"
] |
jvparidon@gmail.com
|
acc3729c60b5a5df550c04e809b00cbb02b4549b
|
de8081ddbfb02b6a3a0a79456e334a0704fea4f2
|
/OHTServer.py
|
b43a9da353347d8143c874132de2f73d21236760
|
[] |
no_license
|
Cking616/NCDStudio
|
01fb5d99577dac8a06ff5003f45b83374ddf3189
|
f09b306f37554032192f22ae675f8a9b0348b7a3
|
refs/heads/master
| 2021-09-14T07:00:01.709782
| 2018-05-09T06:15:41
| 2018-05-09T06:15:41
| 113,954,526
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,611
|
py
|
import time
import asyncore
import socket
import threading
import ctypes
import inspect
recCMD = ""
writeCMDBuffer = []
revCondition = threading.Condition()
isConnected = False
zEncoder = 0
wheelEncoder = 0
motorStatus = False
cmdError = False
gFlag = 0
isEndTimer = False
rampState = 0
def parser_receive(receive):
global gFlag
global cmdError
if receive[:9] == 'ERR Flags':
gFlag = 0
return True
elif receive[:6] == 'Flags:':
cmd = receive.split()
dn = cmd[2]
gFlag = int(dn[-2])
return True
elif receive[:6] == 'ERROR-':
cmdError = True
return True
elif receive[-1] == '.':
tmp_state = receive.split(',')[1]
global rampState
rampState = int(tmp_state)
return True
else:
return False
class OhtHandler(asyncore.dispatcher_with_send):
def handle_read(self):
global recCMD
global cmdError
global gFlag
revCondition.acquire()
data = self.recv(1024)
if data:
recCMD = ""
tmp_rec = data.decode('utf-8')
if not parser_receive(tmp_rec):
recCMD = tmp_rec
revCondition.notify()
revCondition.release()
def handle_close(self):
global isConnected
isConnected = False
def writable(self):
return True
def handle_write(self):
global writeCMDBuffer
if not writeCMDBuffer:
return
cmd = writeCMDBuffer.pop(0)
self.send(cmd.encode('utf-8'))
while writeCMDBuffer:
cmd = writeCMDBuffer.pop(0)
self.send(cmd.encode('utf-8'))
class OhtServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.handler = None
def handle_accept(self):
conn, address = self.accept()
print('Incoming connection from %s' % repr(address))
self.handler = OhtHandler(conn)
global isConnected
isConnected = True
class OhtServerThread(threading.Thread):
def __init__(self, address, port):
threading.Thread.__init__(self)
self.address = address
self.port = port
def run(self):
server = OhtServer(self.address, self.port)
asyncore.loop()
class _Timer(threading.Thread):
def __init__(self, interval, func, args=[], kwargs={}):
threading.Thread.__init__(self)
self.interval = interval
self.func = func
self.args = args
self.kwargs = kwargs
self.finished = threading.Event()
def cancel(self):
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.func(*self.args, **self.kwargs)
self.finished.set()
class LoopTimer(_Timer):
def __init__(self, interval, func, args=[], kwargs={}):
_Timer.__init__(self, interval, func, args, kwargs)
def run(self):
while True:
if not self.finished.is_set():
self.finished.wait(self.interval)
self.func(*self.args, **self.kwargs)
else:
break
def timer_thread():
global isConnected
global isEndTimer
if isEndTimer:
return
if not isConnected:
return
global writeCMDBuffer
global zEncoder
global wheelEncoder
global motorStatus
global recCMD
writeCMDBuffer.append('E9')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('E9')
ret = revCondition.wait(5)
if ret:
wheel_encoder = recCMD
wheelEncoder = wheel_encoder.split()[0]
if wheelEncoder[1] == ':':
wheelEncoder = wheelEncoder[2:]
wheelEncoder = int(wheelEncoder)
revCondition.release()
# print(wheelEncoder)
# print('Wheel Encoder: %s' % wheel_encoder)
writeCMDBuffer.append('P2G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P2G6064')
ret = revCondition.wait(5)
if ret:
if len(recCMD) < 2 or recCMD[1] != ':':
z_encoder = recCMD
zEncoder = int(z_encoder)
revCondition.release()
# print("z:%d" % zEncoder)
# print('Z Encoder: %s' % z_encoder)
writeCMDBuffer.append('D')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('D')
ret = revCondition.wait(5)
if ret:
if recCMD[3] == ',':
motor_status = recCMD
if motor_status[:3] == '3ii':
motorStatus = True
elif motor_status[:3] == '3di':
motorStatus = True
else:
motorStatus = False
revCondition.release()
# print(motorStatus)
# print('Motor Status: %s' % motor_status)
def init_controller():
global isConnected
global motorStatus
global writeCMDBuffer
while not isConnected:
print("Wait for controller connect")
time.sleep(1.5)
writeCMDBuffer.append('P41')
time.sleep(0.2)
writeCMDBuffer.append('P4P460FE65537')
time.sleep(0.2)
writeCMDBuffer.append('P21')
while not motorStatus:
time.sleep(1)
writeCMDBuffer.append('D')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('D')
ret = revCondition.wait()
if ret:
if recCMD[3] == ',':
motor_status = recCMD
if motor_status[:3] == '3ii':
motorStatus = True
elif motor_status[:3] == '3di':
motorStatus = True
else:
motorStatus = False
revCondition.release()
print("Wait for Motor init")
writeCMDBuffer.append('P2P460FE196609')
time.sleep(0.5)
writeCMDBuffer.append('P22')
time.sleep(0.5)
def scan_flags():
global writeCMDBuffer
global wheelEncoder
global motorStatus
global recCMD
global gFlag
writeCMDBuffer.append('E9')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('E9')
ret = revCondition.wait()
if ret:
wheel_encoder = recCMD
wheelEncoder = wheel_encoder.split()[4]
if wheelEncoder[-2] == '1':
gFlag = 1
else:
gFlag = 0
revCondition.release()
while not gFlag:
writeCMDBuffer.append('m9fg601500')
time.sleep(0.3)
writeCMDBuffer.append('E9')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('E9')
ret = revCondition.wait()
if ret:
wheel_encoder = recCMD
wheelEncoder = wheel_encoder.split()[4]
if wheelEncoder[-2] == '1':
gFlag = 1
else:
gFlag = 0
revCondition.release()
time.sleep(0.3)
print("Scanning")
def go_wheel_location(speed, flag, encoder):
global gFlag
if gFlag == 0:
print("Flags Error, Reset Flag")
return False
global writeCMDBuffer
global rampState
if speed > 70:
speed = 70
if speed < 10:
speed = 10
cmd = 'r9lf%02d%d%d' % (speed, flag, encoder)
writeCMDBuffer.append(cmd)
rampState = 1
time.sleep(1)
while rampState:
time.sleep(1)
print("Doing")
return True
def go_y_location(speed, encoder):
global gFlag
if gFlag == 0:
print("Flags Error, Reset Flag")
return False
global writeCMDBuffer
cmd = 'P4A%03d%d' % (speed, encoder)
writeCMDBuffer.append(cmd)
time.sleep(0.2)
while True:
writeCMDBuffer.append('P4G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P4G6064')
revCondition.wait()
y_encoder = recCMD
cur_encoder = int(y_encoder)
revCondition.release()
err = encoder - cur_encoder
if -300 < err < 300:
break
writeCMDBuffer.append(cmd)
time.sleep(1.2)
def out_expand(speed, mm):
global writeCMDBuffer
writeCMDBuffer.append('P4G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P4G6064')
revCondition.wait()
y_encoder = recCMD
cur_encoder = int(y_encoder)
revCondition.release()
num = mm * 100
encoder = cur_encoder - num
go_y_location(speed, 0)
time.sleep(0.5)
def in_expand(speed, mm):
global writeCMDBuffer
cur_encoder = 0
writeCMDBuffer.append('P4G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P4G6064')
revCondition.wait()
y_encoder = recCMD
cur_encoder = int(y_encoder)
revCondition.release()
num = mm * 100
encoder = cur_encoder + num
go_y_location(speed, 97000)
time.sleep(0.5)
def grip():
cmd = 'm630t3700'
global writeCMDBuffer
writeCMDBuffer.append(cmd)
time.sleep(0.5)
writeCMDBuffer.append(cmd)
time.sleep(4)
def release():
cmd = 'm631t3700'
global writeCMDBuffer
writeCMDBuffer.append(cmd)
time.sleep(0.5)
writeCMDBuffer.append(cmd)
time.sleep(4)
def go_z_location(speed, encoder):
global gFlag
if gFlag == 0:
print("Flags Error, Reset Flag")
return False
global writeCMDBuffer
cmd = 'P2A%03d%d' % (speed, encoder)
writeCMDBuffer.append('P2P460FE196609')
time.sleep(0.5)
writeCMDBuffer.append(cmd)
time.sleep(0.2)
while True:
cur_encoder = 0
writeCMDBuffer.append('P2G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P2G6064')
revCondition.wait()
z_encoder = recCMD
cur_encoder = int(z_encoder)
revCondition.release()
err = encoder - cur_encoder
if -500 < err < 500:
# writeCMDBuffer.append('P2P460FE1')
# time.sleep(0.2)
break
print("Doing, Err:%d" % err)
writeCMDBuffer.append(cmd)
time.sleep(0.5)
writeCMDBuffer.append('P2P460FE196609')
time.sleep(0.2)
def stop_wheel():
writeCMDBuffer.append('r9tf000')
time.sleep(0.3)
writeCMDBuffer.append('m9fb72000')
time.sleep(0.2)
def stop_z():
writeCMDBuffer.append('P2P460FE1')
time.sleep(0.2)
writeCMDBuffer.append('P2P260407')
time.sleep(0.3)
def _async_raise(tid, exc_type):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exc_type):
exc_type = type(exc_type)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exc_type))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
|
[
"cking616@mail.ustc.edu.cn"
] |
cking616@mail.ustc.edu.cn
|
0abdaf0ed81c2e215099e12d9bb2753b9ddb4872
|
cf25791401b8037695ba97ade84b99d402722f7c
|
/student/mymodule.py
|
b9a8aa579617ae50e236a6bc2c2b614bc898c266
|
[] |
no_license
|
thiha88/PythonHybridClass
|
646cf4961034c0fb70de5a3af6267d079633acfb
|
2e321b018e8d494d5c930f3957e6ccaa36f3b610
|
refs/heads/master
| 2020-08-28T21:54:34.126763
| 2020-03-01T09:24:03
| 2020-03-01T09:24:03
| 216,326,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
student = {
"name" : "Smith",
"code" : 123,
}
student1 = {
"mother" : "Cherry",
"father" : "Kole",
}
student2 = {
"age" : "20",
"class" : "programming",
}
|
[
"tun88thiha8@gmail.com"
] |
tun88thiha8@gmail.com
|
b932230cee4c86fc2a141ccec76dccb54beb67ce
|
0a5165cf386d7f9d049239fdcf9f36971661c69b
|
/layers/base.py
|
dd298ace052838f3ac9c63749ad21f14489f55a5
|
[] |
no_license
|
peterchen1457/Bayesian-Learning-SB-LWTA
|
5fda4bd2fe9df70571d1ac30f29c90125a3d4091
|
8e5303eb0dce6bdce2ba9ef1bf80fe6b576fb31c
|
refs/heads/master
| 2023-07-29T07:29:23.014082
| 2019-04-30T09:09:07
| 2019-04-30T09:09:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,418
|
py
|
# -*- coding: utf-8 -*-
"""
Base layer for the implementation of the layers comprising blocks with competing units as
described in the paper
The current file contains implementations for feedforward and convolutional layers.
@author: Konstantinos P. Panousis, Dept. of Informatics and Telecommunications,
National and Kapodistrian University of Athens, Greece
"""
import tensorflow as tf
from tensorflow.contrib.distributions import Bernoulli, OneHotCategorical
from utils.distributions import normal_kl, bin_concrete_kl, concrete_kl, kumaraswamy_kl
from utils.distributions import kumaraswamy_sample, bin_concrete_sample, concrete_sample
def variable_on_cpu(name, shape, initializer, dtype=tf.float32, constraint= None, trainable=True):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer, constraint=constraint, dtype=dtype, trainable=trainable)
return var
# =============================================================================
# SB LWTA Layer
# =============================================================================
def SB_Layer(inp, K,U, S=128, bias=True, train = True, reuse=False, sbp=False,
temp_bern=0.67, temp_cat=0.67, activation='none', name='SB_layer'):
"""
Dense layer for the SB-LWTA model
Parameters:
inp: 2d tensor
The input to the current layer.
K: int
The number of blocks for the current layer
U: int
The number of units in each block.
bias: boolean
Flag denoting the use of bias. Default False.
train: boolean
Flag to alternate between train or test branches.
reuse: boolean
Flag to reuse or not the variables of the layer.
sbp: boolean
Flag to enable or disable the stick breaking process
temp_bern: float
The temperature for the bernoulli relaxation
temp_cat: float
The temperature for the categorical relaxation
activation: String
Select the activation function for the current layer.
name: str
The name of the current layer.
Returns:
out: 2d tensor
The output of local competition in the layer after masking with a sample of the IBP,
application of the linear operation and the addition of bias (if bias==True).
mW: 2d tensor
The mean of the weights. Used to load values when calling the compression script
masked_mw: 2d tensor
The mean of the weights of the layer, masked with a sample from the IBP.
Used for calculating the compression ability of the implementation.
masked_sw: 2d tensor
The variance of the weights of the layer, masked with a sample from the IBP.
Used for calculating the compression ability of the implementation.
activations: 2d tensor
The activations for the current batch. Used for plotting the probability of activations.
"""
#cutoff threshold
tau = 1e-2
name = name+'_'+activation
with tf.variable_scope(name, reuse = reuse):
# mean and variance of the weights
mW = variable_on_cpu('mW', [inp.get_shape()[1],K*U], initializer =tf.contrib.layers.xavier_initializer(),
dtype= tf.float32)
sW = variable_on_cpu('sW', [inp.get_shape()[1],K*U],
initializer = tf.initializers.random_normal(-5.,1e-2),
constraint = lambda x: tf.clip_by_value(x, -7.,x),
dtype= tf.float32)
sW = tf.nn.softplus(sW)
# variables and construction for the stick breaking process (if active)
if sbp:
# posterior concentration variables for the IBP
conc1 = variable_on_cpu('sb_t_u_1', [K],
initializer = tf.constant_initializer(K),
constraint=lambda x: tf.clip_by_value(x, -6., x),
dtype = tf.float32)
conc0 = variable_on_cpu('sb_t_u_2', [K],
initializer = tf.constant_initializer(2.),
constraint=lambda x: tf.clip_by_value(x, -6., x),
dtype = tf.float32)
conc1 = tf.nn.softplus(conc1)
conc0 = tf.nn.softplus(conc0)
# stick breaking construction
q_u = kumaraswamy_sample(conc1, conc0, sample_shape = [inp.get_shape()[1].value,K])
pi = tf.cumprod(q_u)
# posterior probabilities z
t_pi = variable_on_cpu('sb_t_pi', [inp.get_shape()[1],K], \
initializer = tf.initializers.random_uniform(-.1, .1),
constraint = lambda x: tf.clip_by_value(x, -5.,600.),\
dtype = tf.float32)
t_pi = tf.nn.sigmoid(t_pi)
biases=0.
if bias:
biases = variable_on_cpu('bias', [K*U], tf.constant_initializer(0.1))
# train branch
if train:
# reparametrizable normal sample
eps = tf.stop_gradient(tf.random_normal([inp.get_shape()[1].value, K*U]))
W = mW + eps * sW
z=1.
# stick breaking process and kl terms
if sbp:
# sample relaxed bernoulli
z_sample = bin_concrete_sample(t_pi,temp_bern)
z = tf.tile(z_sample, [1,U])
re = z*W
# kl terms for the stick breaking construction
kl_sticks = tf.reduce_sum(kumaraswamy_kl(tf.ones_like(conc1), tf.ones_like(conc0),
conc1, conc0, q_u))
kl_z = tf.reduce_sum(bin_concrete_kl(pi, t_pi,temp_bern, z_sample))
tf.add_to_collection('kl_loss', kl_sticks)
tf.add_to_collection('kl_loss', kl_z)
tf.summary.scalar('kl_sticks', kl_sticks)
tf.summary.scalar('kl_z', kl_z)
# cut connections if probability of activation less than tau
tf.summary.scalar('sparsity', tf.reduce_sum(tf.cast(tf.greater(t_pi/(1.+t_pi), tau), tf.float32))*U)
else:
re = W
# add the kl for the weights to the collection
kl_weights = tf.reduce_sum(normal_kl(tf.zeros_like(mW), tf.ones_like(sW), \
mW, sW,W))
tf.add_to_collection('kl_loss', kl_weights)
tf.summary.scalar('kl_weights', kl_weights)
# dense calculation
lam = tf.matmul(inp, re) + biases
# activation branches
if activation=='lwta':
assert U>1, 'The number of competing units should be larger than 1'
# reshape weight for LWTA
lam_re = tf.reshape(lam, [-1,K,U])
# calculate probability of activation and some stability operations
prbs = tf.nn.softmax(lam_re) + 1e-4
prbs /= tf.reduce_sum(prbs, -1, keepdims=True)
# relaxed categorical sample
xi = concrete_sample(prbs, temp_cat)
#apply activation
out = lam_re * xi
out = tf.reshape(out, tf.shape(lam))
# kl for the relaxed categorical variables
kl_xi = tf.reduce_mean(tf.reduce_sum(concrete_kl( tf.ones([S,K,U])/U, prbs, xi), [1]))
tf.add_to_collection('kl_loss', kl_xi)
tf.summary.scalar('kl_xi', kl_xi)
elif activation == 'relu':
out = tf.nn.relu(lam)
elif activation=='maxout':
lam_re = tf.reshape(lam, [-1,K,U])
out = tf.reduce_max(lam_re, -1)
else:
out = lam
# test branch. It follows the train branch, but replacing samples with means
else:
# we use re for accuracy and z for compression (if sbp is active)
re = 1.
z = 1.
if sbp:
mask = tf.cast(tf.greater(t_pi, tau), tf.float32)
z = Bernoulli(probs = mask*t_pi, name="q_z_test", dtype=tf.float32).sample()
z = tf.tile(z, [1,U])
re = tf.tile(mask*t_pi,[1,U])
lam = tf.matmul(inp, re*mW) + biases
if activation == 'lwta':
# reshape and calulcate winners
lam_re = tf.reshape(lam, [-1,K,U])
prbs = tf.nn.softmax(lam_re) +1e-4
prbs /= tf.reduce_sum(prbs, -1, keepdims=True)
# apply activation
out = lam_re*concrete_sample(prbs, 0.01)
out = tf.reshape(out, tf.shape(lam))
elif activation == 'relu':
out = tf.nn.relu(lam)
elif activation=='maxout':
lam_re = tf.reshape(lam, [-1,K,U])
out = tf.reduce_max(lam_re, -1)
else:
out = lam
return out, mW, z*mW, z*sW**2, z
# =============================================================================
# SB LWTA Convolutional Layer
# =============================================================================
def SB_Conv2d(inp, ksize, S=128, padding='SAME', strides=[1,1,1,1],
bias = True, train = True, reuse= False, sbp=False, temp_bern=0.5, temp_cat=0.5,
activation='lwta', name='conv'):
"""
Convolutional layer for the SB-LWTA model, incorporating local competition.
Parameters:
inp: 4d tensor
The input to the current layer.
ksize: 5d tensor
The size of the kernels. The last 2 dimensions denote the blocks and units therein.
padding: str
The padding for the conv operation. Default: SAME. (see tf conv documentation).
strides: 4d tensor
The strides for the conv operation. Default: [1,1,1,1] (see tf conv).
bias: boolean
Flag denoting the use of bias.
train: boolean
Flag to alternate between train or not branches.
reuse: boolean
Flag to reuse or not the variables of the layer.
sbp: boolean
Flag to enable or disable the stick breaking process
temp_bern: float
The temperature for the bernoulli relaxation
temp_cat: float
The temperature for the categorical relaxation
activation: String
Select the activation function for the current layer.
name: str
The name of the current layer.
Returns:
out: 4d tensor
The output of the layer after the masked convolution operation, the addition of bias (if bias==True)
and the LWTA activation.
mW: 2d tensor
The mean of the weights. Used to load values when calling the compression script
masked_mw: 4d tensor
The mean of the weights of the convolutional kernel masked with a sample from the IBP (if active).
Used for calculating the compression ability of the implementation.
masked_sw: 4d tensor
The variance of the weights of the convolutional kernel masked with a sample from the IBP (if active).
Used for calculating the compression ability of the implementation.
activations: 2d tensor
The activations for the current batch. Used for plotting the probability of activations.
"""
K = ksize[-2]
U = ksize[-1]
tau = 1e-2
name = name+'_'+activation
with tf.variable_scope(name, reuse=reuse):
# variables for the weights
mW = tf.get_variable('mW', [ksize[0], ksize[1], ksize[2], K*U],
initializer=tf.contrib.layers.xavier_initializer(),
dtype= tf.float32)
sW= tf.get_variable('sW', [ksize[0], ksize[1], ksize[2], K*U],
initializer=tf.constant_initializer(-5.),
constraint = lambda x: tf.clip_by_value(x, -7., x ),
dtype= tf.float32)
sW = tf.nn.softplus(sW)
# variables and construction for the stick breaking process
if sbp:
# posterior concentrations for the Kumaraswamy distribution
conc1 = variable_on_cpu('sb_t_u_1', [K],
initializer = tf.constant_initializer(3.),
constraint=lambda x: tf.clip_by_value(x, -6., x),
dtype = tf.float32)
conc0 = variable_on_cpu('sb_t_u_2', [K],
initializer = tf.constant_initializer(1.),
constraint=lambda x: tf.clip_by_value(x, -6., x),
dtype = tf.float32)
conc1 = tf.nn.softplus(conc1)
conc0 = tf.nn.softplus(conc0)
# stick breaking construction
q_u = kumaraswamy_sample(conc1, conc0, sample_shape = [inp.get_shape()[1].value,K])
pi = tf.cumprod(q_u)
# posterior bernooulli (relaxed) probabilities
t_pi = tf.get_variable('sb_t_pi', [K], \
initializer = tf.initializers.random_uniform(-5., 1.),
constraint = lambda x: tf.clip_by_value(x, -7., 600.),\
dtype = tf.float32)
t_pi = tf.nn.sigmoid(t_pi)
biases=0.
if bias:
biases = variable_on_cpu('bias', [K*U], tf.constant_initializer(0.0))
z = 1.
# train branch
if train:
# reparametrizable normal sample
eps = tf.stop_gradient(tf.random_normal(mW.get_shape()))
W = mW + eps*sW
re = tf.ones_like(W)
# stick breaking kl and operations
if sbp:
z_sample = bin_concrete_sample(t_pi, temp_bern)
z = tf.tile(z_sample,[U])
W *= z
kl_sticks = tf.reduce_sum(kumaraswamy_kl(tf.ones_like(conc1), tf.ones_like(conc0),
conc1, conc0, q_u))
kl_z = tf.reduce_sum(bin_concrete_kl(pi, t_pi,temp_bern, z_sample))
tf.add_to_collection('kl_loss', kl_sticks)
tf.add_to_collection('kl_loss', kl_z)
tf.summary.scalar('kl_sticks', kl_sticks)
tf.summary.scalar('kl_z', kl_z)
# if probability of activation is smaller than tau, it's inactive
tf.summary.scalar('sparsity', tf.reduce_sum(tf.cast(tf.greater(t_pi/(1.+t_pi), tau), tf.float32))*U)
# add the kl terms to the collection
kl_weights = tf.reduce_sum(normal_kl(tf.zeros_like(mW), tf.ones_like(sW), \
mW, sW, W))
tf.add_to_collection('losses', kl_weights)
tf.summary.scalar('kl_weights', kl_weights)
# convolution operation
lam = tf.nn.conv2d(inp, W, strides=strides, padding = padding) + biases
# choose activation based on input
if activation == 'lwta':
assert U>1, 'The number of competing units should be larger than 1'
# reshape weight to calculate probabilities
lam_re = tf.reshape(lam, [-1, lam.get_shape()[1], lam.get_shape()[2], K,U])
prbs = tf.nn.softmax(lam_re) + 1e-5
prbs /= tf.reduce_sum(prbs, -1, keepdims=True)
# draw relaxed sample and apply activation
xi = concrete_sample( prbs, temp_cat)
out = lam_re * xi
out = tf.reshape(out, tf.shape(lam))
# add the relative kl terms
kl_xi = tf.reduce_mean(tf.reduce_sum(concrete_kl( tf.ones_like(lam_re)/U, prbs, xi), [1]))
tf.add_to_collection('kl_loss', kl_xi)
tf.summary.scalar('kl_xi', kl_xi)
elif activation == 'relu':
# apply relu
out = tf.nn.relu(lam)
elif activation == 'maxout':
#apply maxout activation
lam_re = tf.reshape(lam, [-1, lam.get_shape()[1], lam.get_shape()[2], K,U])
out = tf.reduce_max(lam_re, -1, keepdims=False)
else:
print('Activation:', activation, 'not implemented.')
# test branch, same with train but replace samples with means
else:
re = tf.ones_like(mW)
z = 1.
# if sbp is active calculate mask and draw samples
if sbp:
mask = tf.cast(tf.greater(t_pi, tau), tf.float32)
z = Bernoulli(probs = mask*t_pi, name="q_z_test", dtype=tf.float32).sample()
z = tf.tile(z, [U])
re = tf.tile(mask*t_pi,[U])
# convolution operation
lam = tf.nn.conv2d(inp, re *mW, strides=strides, padding = padding) + biases
if activation == 'lwta':
# calculate probabilities of activation
lam_re = tf.reshape(lam, [-1, lam.get_shape()[1], lam.get_shape()[2], K,U])
prbs = tf.nn.softmax(lam_re) + 1e-5
prbs /= tf.reduce_sum(prbs,-1, keepdims=True)
# draw sample for activated units
out = lam_re * concrete_sample(prbs, 0.01)
out = tf.reshape(out, tf.shape(lam))
elif activation == 'relu':
# apply relu
out = tf.nn.relu(lam)
elif activation=='maxout':
# apply maxout operation
lam_re = tf.reshape(lam, [-1, lam.get_shape()[1], lam.get_shape()[2], K,U])
out = tf.reduce_max(lam_re, -1)
else:
print('Activation:', activation,' not implemented.')
return out, mW, z * mW, z * sW**2, z
# =============================================================================
# Just a custom conv layer with mean and variance for some checks
# =============================================================================
def customConv2d(inp, ksize, activation=None, reuse= False, train= True,\
padding='SAME', strides=[1,1,1,1], bias = True, batch_norm=False, name='conv' ):
with tf.variable_scope(name, reuse = reuse):
mW = tf.get_variable(name+'mW',
[ksize[0],ksize[1],ksize[2],ksize[3]*ksize[4]],
dtype= tf.float32)
sW = tf.get_variable(name+'sW',
[ksize[0],ksize[1],ksize[2],ksize[3]*ksize[4]],
initializer = tf.constant_initializer(-5.),
dtype= tf.float32)
sW = tf.nn.softplus(sW)
eps = tf.stop_gradient(tf.random_normal(mW.get_shape()))
W = mW + eps*sW
out = tf.nn.conv2d(inp, W, strides = strides, padding= padding)
if bias:
bias_conv = tf.get_variable('biases', ksize[-1]*ksize[-2], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
out = tf.nn.bias_add(out,bias_conv)
if activation:
out = activation(out)
return out, -1.,-1.
|
[
"noreply@github.com"
] |
peterchen1457.noreply@github.com
|
1e59c9981151054438ab88f0a110806c60f797b4
|
4321acd4ee0d46545b8fc89bd96ca91ef64dd1ad
|
/blog/models.py
|
103562f19ed296d1b0546ceb73b0a31d97147d2e
|
[] |
no_license
|
juliamattos/my-first-blog
|
f4c182ba9540bfcba6aa90c1c4c128433b6eccb3
|
2df3cca259cca44e96e12144057e3202da737a8b
|
refs/heads/master
| 2016-09-13T02:39:13.493515
| 2016-05-14T19:04:47
| 2016-05-14T19:04:47
| 58,825,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.publish_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"juliamattos.br@gmail.com"
] |
juliamattos.br@gmail.com
|
cb112458e1c759a10c2cc4f1720282056bf4e17b
|
f9e74113f4a396e028159e3f2a8e23fa01b03ca3
|
/Year 11/filestoragecalc.py
|
21b454d4998cb21e2bb76b6004a4757987472960
|
[] |
no_license
|
haydenso/school-projects
|
e6ea7ccc850a468a7352919fdfaf350289d666cc
|
1ea9f313fe7d32036c5b210b8fa9dc1736371483
|
refs/heads/master
| 2023-04-28T15:11:44.187732
| 2021-05-18T04:24:29
| 2021-05-18T04:24:29
| 254,290,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
def fileStorageCalc(h, w, bit):
TOTAL_PIXELS = h * w
FILESIZE = TOTAL_PIXELS * bit
# FILESIZE is in bytes
KB = FILESIZE/1024
MB = KB/1000
return KB, MB
while True:
height = input("Input number of pixels high: ")
width = input("input width in pixels: ")
bitdepth = input("Input colordepth in bits: ")
result = fileStorageCalc(height, width, bitdepth) #result in tuple
print("Filesize is " + str(result[0]) + "KB")
print("Or " + str(result[1]) + "MB")
break
|
[
"sohayden929@gmail.com"
] |
sohayden929@gmail.com
|
908e5c6310297a96be1a25e61f114b8c775bfb31
|
5f65df2a5e951bfbe9b042d3331a924145c28302
|
/week-1/max_pairwise_product.py
|
3ad38cbc361b2e436aff31c7f7f97f6ee9241720
|
[
"MIT"
] |
permissive
|
necromuralist/algorithmic-toolbox
|
f4aee836b9a9bc144622d03de59aa01885716f88
|
18430ac17045e60ec3540a3e5635202f5e8850eb
|
refs/heads/master
| 2022-08-31T07:57:06.665017
| 2021-02-24T03:37:04
| 2021-02-24T03:37:04
| 138,454,499
| 3
| 1
|
MIT
| 2022-08-23T17:26:44
| 2018-06-24T04:54:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# python3
# Maximum Pairwise Product Problem
# Find the maximum product of two distinct numbers in a sequence of non-negative integers.
# Input: A sequence of non-negative integers.
# Output: The maximum value that can be obtained by multiplying two different elements from the sequence.
# 5
# 6
# 2
# 7
# 4
# 5 6
# 30
# 30
# 10 12
# 35 42
# 20 24
# 2 7 4
# 10 35 20
# 12 42 24
# 7 4
# 28
# 14
# 8 28
# Given a sequence of non-negative integers a 1 , . . . , a n , compute
# max a i · a j .
# 1≤i,j≤n
def max_pairwise_product_brute(numbers):
n = len(numbers)
max_product = 0
for first in range(n):
for second in range(first + 1, n):
max_product = max(max_product,
numbers[first] * numbers[second])
return max_product
def max_pairwise_product_take_two(numbers):
"""Finds the maximum pairwise product in te numbers
Args:
numbers (list): non-negative integers
Returns:
int: largest possible product from the numbers
"""
first_index = 0
first_value = 0
n = len(numbers)
assert n >= 2
for index in range(1, n):
if numbers[index] > first_value:
first_value = numbers[index]
first_index = index
second_value = 0
start = 1 if first_index == 0 else 0
for index in range(start, n):
if index != first_index and numbers[index] > second_value:
second_value = numbers[index]
return first_value * second_value
if __name__ == '__main__':
input_n = int(input())
input_numbers = [int(x) for x in input().split()]
print(max_pairwise_product_take_two(input_numbers))
|
[
"necromuralist@protonmail.com"
] |
necromuralist@protonmail.com
|
b51f98fcb2a8f15674e7d1def0ce92d1b5b8795c
|
052e8e19c3a679402fbb2563c0dd6ed596d45278
|
/Python/Exercise Files/Ch3/timedeltas_start.py
|
2fcfb3f3dacc6d5f89ebfe3c1fda58872f9f2027
|
[] |
no_license
|
amshapriyaramadass/learnings
|
a7739f257e310327c90a78780fea2ebff34a6325
|
e317058a404fde30bbc3c118ba1a1aaf533d8d4b
|
refs/heads/master
| 2022-12-23T11:09:42.069445
| 2020-10-01T21:42:08
| 2020-10-01T21:42:08
| 283,662,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
#
# Example file for working with timedelta objects
#
from datetime import date
from datetime import time
from datetime import datetime
from datetime import timedelta
# construct a basic timedelta and print it
print(timedelta(days=365,hours=3,minutes =1))
# print today's date
today=date.today()
print("today date is:,", today)
# print today's date one year from now
print("today's date one year from now:", today+timedelta(days=365))
# create a timedelta that uses more than one argument
print("2 weeks 3 days from now",today+timedelta(days =3, weeks=2))
# calculate the date 1 week ago, formatted as a string
t=datetime.now() - timedelta(weeks=1)
s = t.strftime("%A %B %d,%Y")
print("week ago date is :"+ s)
### How many days until April Fools' Day?
today = date.today()
afd= date(today.year, 4,1)
print("afd is ", afd)
# use date comparison to see if April Fool's has already gone for this year
# if it has, use the replace() function to get the date for next year
if afd < today:
print("April fools day is passed %d days" % ((today -afd).days))
afd=afd.replace(year=today.year+1)
print("april fool days:", afd)
# Now calculate the amount of time until April Fool's Day
time_to_afd = afd-today
print("its is just", time_to_afd.days,"until next April fool day")
|
[
"amshapriya248@gmail.com"
] |
amshapriya248@gmail.com
|
f40df01f7c05145dfe5006fcf3c6d5366d1eca7c
|
6785bd6122e6eb013da781dd6ab6f1989f243e8e
|
/ocqm_cohort_website/builder.py
|
1b019a9b2674642473fd1eff92160a90638082f1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
seantis/ocqm_cohort_website
|
aed5cff8432fbde503c02ea253e7c1206f633f28
|
21701f2aeed3c317ead24bd6244a1adfe4d89c6a
|
refs/heads/master
| 2016-09-16T15:10:50.174636
| 2014-09-22T12:24:00
| 2014-09-22T12:24:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,673
|
py
|
import os
from babel.support import Translations
from copy import copy
from jinja2 import Environment, FileSystemLoader
from . import content
from . import db
from . import locale
from . import paths
def create_environment(theme_path, language):
loader = FileSystemLoader(searchpath=theme_path, encoding='utf-8')
environment = Environment(loader=loader, extensions=['jinja2.ext.i18n'])
translations = Translations.load(paths.get_locale_path(), language)
environment.install_gettext_translations(translations)
# global functions available in the templates
environment.globals.update(get_language_name=locale.get_language_name)
return environment
def include_paths_in_output(include_paths, output_path):
for include_path in include_paths:
paths.copy_files(
include_path,
os.path.join(output_path, os.path.split(include_path)[-1])
)
def render(theme_path, output_path, language, shared_context, pages):
# load jinja2 environment
env = create_environment(theme_path, language)
for page in pages:
template = env.get_template(page.template)
context = copy(shared_context)
context.update({
'page': page,
'pages': pages
})
template.stream(**context).dump(
os.path.join(output_path, '{}.html'.format(page.id)), 'utf-8'
)
def build_breadcrumbs(pages, current_page):
if current_page.id == pages[0].id: # root page
return (
(current_page.id, current_page.title),
)
else:
return (
(pages[0].id, pages[0].title),
(current_page.id, current_page.title),
)
def build_site(output_path, metadata={}):
metadata = metadata or db.load_metadata()
assert metadata, """
No metadata was found in path {}
""".format(output_path)
# the first language is the default language; the default language is
# stored in the root path, others in the ./[lang] path
languages = metadata['languages']
default_language = languages[0]
# the ./media directory in the user data directory will be included
# in the output if it is available
content_media = os.path.join(os.getcwd(), 'media')
for language in languages:
# put the default language in the root folder, the other languages in
# sub-folders
if language == default_language:
language_output_path = output_path
else:
language_output_path = os.path.join(output_path, language)
paths.ensure_directory(language_output_path)
# load the cohort
cohort = content.Cohort(metadata, languages, language)
# load the pages
assert len(metadata['pages']) == 5, """
Define exactly 5 pages (with index)
"""
pages = [
content.Page(p, languages, language) for p in metadata['pages']
]
# build the navigation
for page in pages:
page.breadcrumbs = build_breadcrumbs(pages, page)
# render the templates
render(
theme_path=paths.get_theme_path(),
output_path=language_output_path,
language=language,
shared_context={
'cohort': cohort,
'pages': pages
},
pages=pages
)
# copy the static files
if os.path.exists(content_media):
include_paths = (paths.get_static_path(), content_media)
else:
include_paths = (paths.get_static_path(), )
include_paths_in_output(include_paths, language_output_path)
|
[
"denis@href.ch"
] |
denis@href.ch
|
84059663f00ee67d5175d368997580ee534a5781
|
53b36d25b56f2f01eb0014f36700b12d3a457403
|
/adminSystem/settings.py
|
83c65c71783b47f287586cdd1dbe7edf6fc29965
|
[] |
no_license
|
Kadzo-Dzombo/ideas_kenya
|
94e7782ce91970e65c68d325dc370a70a95d9fd4
|
3c562e20965a2eac4be7036ff474b5df8ea2bbfe
|
refs/heads/main
| 2023-08-31T10:50:01.100212
| 2021-10-14T18:46:14
| 2021-10-14T18:46:14
| 412,057,681
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,591
|
py
|
import os
from pathlib import Path
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
# BASE_DIR = Path(__file__).resolve().parent.parent
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-@$vk3fo9@lb+f5s+zp2y1=cftwj8#(p)4xbz6-k$th4bh$ugw1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'core',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'whitenoise.runserver_nostatic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'adminSystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adminSystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
LOGOUT_REDIRECT_URL = '/admin/login/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
django_heroku.settings(locals())
|
[
"50664032+Kadzo-Dzombo@users.noreply.github.com"
] |
50664032+Kadzo-Dzombo@users.noreply.github.com
|
d17776b6855dfcc141feea8086af080f6d09fc11
|
8bcf5bf18f6e9c1d5871ef8a88ef5921e03e9b02
|
/koldunov/api/urls.py
|
a4efa9857dea75f924535c42e25c43c5803313cc
|
[] |
no_license
|
volgoweb/rest_example
|
73f5fc26cce45c0aae49247768f74ffa2f4c01d4
|
7ee8b87914d6c69c80158e7e22a6b454c3e7f76b
|
refs/heads/master
| 2021-01-10T01:32:44.098668
| 2017-09-05T12:42:00
| 2017-09-05T12:42:00
| 51,444,569
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# -*- coding: utf-8 -*-
from rest_framework import routers
from .views.product_views import CategoryViewSet, ItemViewSet
from .views.stat_views import StatViewSet
router = routers.SimpleRouter()
router.register(r'category', CategoryViewSet)
router.register(r'item', ItemViewSet)
router.register(r'stat', StatViewSet)
urlpatterns = router.urls
|
[
"volgoweb@bk.ru"
] |
volgoweb@bk.ru
|
5aff83327048090bb929482367b4ab95f3704e0d
|
9c6afa0faf423bc41107bec9019d076137074777
|
/picarray.py
|
f1358398e1f54876c7fdc655b6a65593343a8f93
|
[] |
no_license
|
marcwgit/gradpics
|
f49d4060376414a9837de8d80e01e77a433cf4b0
|
cd701c464d55b30556be5bb399bc1fc8e5ecf8ed
|
refs/heads/master
| 2021-01-24T10:59:30.815869
| 2016-10-10T17:26:00
| 2016-10-10T17:26:00
| 70,276,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,351
|
py
|
#!/usr/bin/python
from sys import argv
# from sys import re
script, flist, thumbsize, fullsize, widthnum, title = argv
print "script is: ", script
print "file list is: ", flist
print "thumb size is: ", thumbsize
print "full size is: ", fullsize
print "row width is: ", widthnum
print "title is: ", title
outfile = thumbsize + ".html"
infile = flist
imgsrcpre = '"><img src="' + thumbsize + '/'
rowwidth = int(widthnum)
# f = open('workfile1', 'w')
f = open(outfile, 'w')
ifile = open(infile, 'r')
# not needed print ifile.read()
myList = [q for q in ifile.readlines()]
# myList=['GEO_7918.jpg' , 'GEO_7920.jpg' , 'GEO_7921.jpg' , 'GEO_7923.jpg' , 'GEO_7926.jpg' , 'GEO_7928.jpg' , 'GEO_7929.jpg']
## p = re.compile( './')
# top = '<html><head><title>Graduate Commencement 2014</title></head><body><table><tr><td colspan=5><H3 align="center">Graduate Commencement 2014</H3></td></tr><tr><td colspan=4></td><td><H5 align="center"><a href="http://dev.pcom.edu/pcomweb/marcwe/grad_hooding_2014/grad_hooding_2014.html"></a></H5></td></tr><tr>'
top = '<html><head><title>' + title + '</title></head><body><p align="center"><H3 align="center">' + title + '</H3></p>'
hoodselect = '<p><H4 style = "text-align:right;margin-right:100px;"><a href="thumb15.html">Graduation Images</a></H4></p><p><H5 style = "margin-left:60px;"><a href="thumb-hood10.html">Very Small</a> <a href="thumb-hood15.html">Small</a> <a href="thumb-hood20.html">Medium</a> <a href="thumb-hood30.html">Large</a></H5></p>'
commselect = '<p><H4 style = "text-align:right;margin-right:100px;"><a href="thumb-hood20.html">Hooding Images</a></H4></p><p><H5 style = "margin-left:60px;"><a href="thumb10.html">Small</a> <a href="thumb15.html">Medium</a> <a href="thumb20.html">Large</a></H5></p>'
f.write(top)
if len(thumbsize) == 12:
f.write(hoodselect)
else:
f.write(commselect)
i = 0
while i < len(myList):
print myList[i]
## p.sub( '', myList[i])
fil = myList[i]
f.write ('<td><a href="' + fullsize + '/')
f.write (fil)
# f.write ('"><img src="thumb15/')
f.write (imgsrcpre)
f.write (fil)
f.write ('" alt="')
f.write (fil)
f.write ('" /></a></td>')
f.write ('\n')
# if (i+1)%3==0: f.write ('</tr><tr>\n')
if (i+1)%rowwidth==0: f.write ('</tr><tr>\n')
i = i + 1
f.closed
ifile.closed
|
[
"noreply@github.com"
] |
marcwgit.noreply@github.com
|
f3615d9c654c8a6ee21eeeb224369c69cecde064
|
a266cfe89cf9c7347abf712e3b800468438448c2
|
/addons/magenenst_ghn_integration/controllers/controller.py
|
2e4e11ef1139e253f77fada25db92a35539f96fe
|
[] |
no_license
|
cialuo/project_template
|
db8b9f9c4115a6d44363a39e311363e8f3e7807f
|
2f875bdc5b730afeae3dd8dffafde852d0a44936
|
refs/heads/main
| 2023-03-09T17:50:55.398557
| 2021-02-24T00:17:59
| 2021-02-24T00:17:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http, _
from odoo.exceptions import AccessError, MissingError
from odoo.http import request
|
[
"dungth@trobz.com"
] |
dungth@trobz.com
|
5ef953377a82188de0c437031ecd64571429c4dd
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/special-array-with-x-elements-greater-than-or-equal-x.py
|
01c11c68d89db61b02579b98174c1831b10e0923
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 3,409
|
py
|
# Time: O(n)
# Space: O(1)
# counting sort solution
class Solution(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
n = len(nums)
for i in xrange(len(count)):
if i == n:
return i
n -= count[i]
return -1
# Time: O(n)
# Space: O(1)
# counting sort + binary search solution
class Solution2(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def inplace_counting_sort(nums, reverse=False): # Time: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
while nums[i] >= 0:
count[nums[i]] -= 1
j = count[nums[i]]
nums[i], nums[j] = nums[j], ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
inplace_counting_sort(nums, reverse=True)
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(n)
# Space: O(n)
# counting sort + binary search solution
class Solution3(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def counting_sort(nums, reverse=False): # Time: O(n), Space: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
result = [0]*len(nums)
if not reverse:
for num in reversed(nums): # stable sort
count[num] -= 1
result[count[num]] = num
else:
for num in nums: # stable sort
count[num] -= 1
result[count[num]] = num
result.reverse()
return result
nums = counting_sort(nums, reverse=True) # extra O(n) space for stable sort
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(nlogn)
# Space: O(1)
# sort solution
class Solution4(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort(reverse=True) # Time: O(nlogn)
for i in xrange(len(nums)): # Time: O(n)
if nums[i] <= i:
break
else:
i += 1
return -1 if i < len(nums) and nums[i] == i else i
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
0271e2bd69581d5e5dc88b564ddc46e9e59ed80e
|
06289aabd78e6a0e5e5ab8360fffbf9a8504d615
|
/api/budget/serializers/expense_serializer.py
|
0f5fd044d6751912c22d145c7efad0ab32499638
|
[] |
no_license
|
jtclayt/finance_planner_api
|
6ca8130c761999abc01e03429a0676c0c803b640
|
06cd592e479145cbeb6acad4574021ef7515b33b
|
refs/heads/main
| 2023-08-15T02:20:34.455483
| 2021-09-22T16:15:49
| 2021-09-22T16:15:49
| 409,044,817
| 0
| 0
| null | 2021-09-22T05:08:48
| 2021-09-22T02:54:50
|
Python
|
UTF-8
|
Python
| false
| false
| 627
|
py
|
from rest_framework import serializers
from ..models.budget import Budget
from ..models.expense import Expense
class ExpenseSerializer(serializers.HyperlinkedModelSerializer):
'''Serializer for list view of expenses'''
url = serializers.HyperlinkedIdentityField(view_name='budget:expense-detail')
budget = serializers.PrimaryKeyRelatedField(queryset=Budget.objects.all())
class Meta:
model = Expense
fields = (
'id', 'url', 'description', 'annual_amount', 'monthly_amount',
'budget', 'user_id', 'created_at', 'updated_at'
)
read_only_fields = ('id',)
|
[
"jt.clayton92@yahoo.com"
] |
jt.clayton92@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.