seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
30823116350
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
# from django.views.decorators.http import require_POST
from shop.models import Product
from .models import Cart, CartItem
# from .forms import CartAddProductForm
from django.contrib.auth.decorators import login_required
# from .forms import CartAddProductForm
@login_required
def cart_add(request, product_id, product_qty=None):
obj, created = Cart.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
item, itemCreated = CartItem.objects.update_or_create(
cart=obj, product=product)
item.price = product.price
if(itemCreated == False):
item.quantity = item.quantity+1
# if item.quantity = request.GET['q']
obj.items.add(item)
item.save()
obj.save()
return redirect('cart:cart_detail')
@login_required
def cart_add_q(request, product_id, product_qty=None):
obj, created = Cart.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
item, itemCreated = CartItem.objects.update_or_create(
cart=obj, product=product)
item.price = product.price
# if item.quantity = request.GET['q']
item.quantity = request.GET['q']
if request.GET['q'] == "0":
item.delete()
else:
obj.items.add(item)
item.save()
obj.save()
return redirect('cart:cart_detail')
# form = CartAddProductForm(request.POST)
# if form.is_valid():
# cd = form.cleaned_data
# item.quantity=cd['quantity'],
def cart_remove(request, product_id):
obj, created = Cart.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
cartItems = CartItem.objects.filter(cart=obj, product=product)
cartItems.delete()
return redirect('cart:cart_detail')
@login_required
def cart_detail(request):
cart = Cart.objects.get(user=request.user)
return render(request, 'cart/cart_detail.html', {'cart': cart})
|
studiosemicolon/onlineshop
|
cart/views.py
|
views.py
|
py
| 2,091
|
python
|
en
|
code
| 23
|
github-code
|
6
|
13119405899
|
from django.conf.urls import url, include
from . import views
from .models import *
from rest_framework import routers, permissions, serializers, viewsets
from oauth2_provider.ext.rest_framework import TokenHasReadWriteScope, TokenHasScope
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
class UserProfileViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
router = routers.DefaultRouter()
router.register(r'users', UserProfileViewSet)
# router.register(r'leagues', views.league_list)
urlpatterns = [
url(r'^index', views.index),
url(r'^', include(router.urls)),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^leagues/$', views.LeagueList.as_view()),
url(r'^leagues/(?P<pk>[0-9]+)/$', views.LeagueDetail.as_view()),
]
|
dammahom/matchpredict
|
gameapi/urls.py
|
urls.py
|
py
| 1,075
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37325658730
|
'''
5. Write a Pandas program to convert a dictionary to a Pandas series.
'''
dict1 = {"First Name" : ["Kevin","Lebron","Kobe","Michael"],
"Last Name" : ["Durant","James","Bryant","Jordan"],
"Team" : ["Brooklyn Nets","Los Angeles Lakers","Los Angeles Lakers","Chicago Bulls"]
}
import pandas as pd
pd_series = pd.Series(dict1)
print(pd_series)
|
ErenBtrk/Python-Fundamentals
|
Pandas/PandasDataSeries/Exercise5.py
|
Exercise5.py
|
py
| 371
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5308409788
|
## adsbib.py
## A tool for collecting BibTeX records from NASA ADS.
##
## Call with reference to a plaintext list of bibcodes,
## separated by newlines. Output will be to the same
## filename, appended with .bib
## >> python3 ads-bib.py bibcodes
##
## Note : To strip an existing BibTeX file down to bibcodes with vim,
## :v/^@/d
## :%s/@.*{//g
## :%s/,//g
import ads
## Setup the argument parser
import argparse
parser = argparse.ArgumentParser(description='bibcode to import')
parser.add_argument('bibcode', help='A bibcode for input')
args = parser.parse_args()
## Read bibcode input from file if not specified
#bibcode = args.bibcode
with open(args.bibcode) as f:
bibcode = f.read().splitlines()
f.close()
## Query ADS with the set of bibcodes
q = ads.ExportQuery(bibcodes=bibcode,format='bibtex')
bibtex = q.execute()
## Write BibTeX entries to file
with open(args.bibcode+'.bib', 'a') as bibfile:
print(bibtex, file=bibfile)
bibfile.close()
|
lowderchris/ads-bib
|
ads-bib.py
|
ads-bib.py
|
py
| 959
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14255729146
|
from _MOM import MOM
from _TFL import TFL
import _TFL._Meta.Object
import _TFL._Meta.Once_Property
from _TFL.predicate import first, paired
from _TFL.Decorator import getattr_safe
from _TFL.I18N import _, _T, _Tn
import itertools
import logging
class Entity (TFL.Meta.Object) :
"""Base class for scope-specific E_Type managers."""
def __init__ (self, etype, scope) :
self._etype = etype
self.home_scope = scope
# end def __init__
def __call__ (self, * args, ** kw) :
return self._etype (* args, scope = self.home_scope, ** kw)
# end def __call__
@TFL.Meta.Once_Property
@getattr_safe
def default_child (self) :
"""The default child of partial entity types, if any."""
dc = self._etype.default_child
if dc is not None :
try :
return self.home_scope [dc]
except KeyError :
pass
# end def default_child
@property
@getattr_safe
def ems (self) :
return self.home_scope.ems
# end def ems
@property
@getattr_safe
def E_Type (self) :
return self._etype
# end def E_Type
@TFL.Meta.Once_Property
@getattr_safe
def is_partial (self) :
return self._etype.is_partial
# end def is_partial
def ac_query_attrs (self, names, values, AQ = None) :
if AQ is None :
AQ = self._etype.AQ
for n in names :
if n in values :
try :
vq = getattr (AQ, n).AC (values [n])
except (ValueError, TypeError) :
pass
else :
if vq is not None :
yield vq
# end def ac_query_attrs
def ac_ui_display (self, names, matches) :
def _gen (self, names) :
for n in names :
try :
attr = self.get_etype_attribute (n)
except AttributeError :
disp = lambda v : getattr (v, "ui_display", v)
else :
disp = attr.ac_ui_display
yield disp
attr_displayers = list (_gen (self, names))
for match in matches :
yield tuple (d (v) for d, v in zip (attr_displayers, match))
# end def ac_ui_display
def get_etype_attribute (self, name) :
etype = self._etype
result = getattr (etype.AQ, name)._attr.kind
return result
# end def get_etype_attribute
def query (self, * filters, ** kw) :
"""Return all entities matching the conditions in `filters` and `kw`.
When no `filters` or `kw` are specified, `query` returns the
transitive extension of the type in question, i.e., all instances
of the type and all its subclasses.
When `strict = True` is specified as the only argument, `query`
returns the strict extension, i.e., all instances of the type in
question, but none of its subclasses.
All other filters reduce the number of instances returned to those
that satisfy the filter conditions.
"""
sort_key = kw.pop ("sort_key", None)
Type = self._etype
result = self.ems.query (Type, * filters, ** kw)
if sort_key is not None :
result = result.order_by (sort_key)
return result
# end def query
def raw_query_attrs (self, names, values = None, AQ = None) :
if AQ is None :
AQ = self._etype.AQ
def _gen (self, names, values, AQ) :
if values is None :
for n in names :
aq = getattr (AQ, n)
if aq is not None :
yield aq
else :
raise MOM.Error.Attribute_Unknown (None, n, None)
else :
for n in names :
if n in values :
aq = getattr (AQ, n)
v = values [n]
if aq is not None :
eq = aq.EQ (v)
if eq is not None :
yield eq
else :
raise MOM.Error.Attribute_Syntax \
(None, aq._attr, v)
else :
raise MOM.Error.Attribute_Unknown (None, n, v)
return tuple (_gen (self, names, values, AQ))
# end def raw_query_attrs
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws `ValueError`
return getattr (self.__super, name)
etype = self._etype
try :
return getattr (etype, name)
except Exception :
try :
return etype.attributes [name]
except KeyError :
raise AttributeError
# end def __getattr__
def __instancecheck__ (self, instance) :
return isinstance (instance, self.E_Type.Essence)
# end def __instancecheck__
def __subclasscheck__ (self, subclass) :
return issubclass (subclass, self.E_Type.Essence)
# end def __subclasscheck__
def __repr__ (self) :
return "<E_Type_Manager for %s of scope %s>" % \
(self._etype.type_name, self.home_scope.name)
# end def __repr__
# end class Entity
class An_Entity (Entity) :
"""Scope-specific manager for a specific type of anonymous entities."""
def example (self, full = True) :
return self (raw = True, ** self._etype.example_attrs (full))
# end def example
def query (self, * args, ** kw) :
### we need to define this function to hide the `query` attribute of
### the entities (which is a list of all attributes with the kind
### `Query`)
return TFL.Q_Result (())
# end def query
# end class An_Entity
class Id_Entity (Entity) :
"""Scope-specific manager for a specific essential object- or link-type."""
def __call__ (self, * args, ** kw) :
pid = kw.pop ("__pid", None)
result = self.__super.__call__ (* args, ** kw)
self.home_scope.add (result, pid = pid)
return result
# end def __call__
@property
@getattr_safe
def count (self) :
"""Return the transitive count of objects or links."""
return self.ems.count (self._etype, strict = False)
# end def count
@property
@getattr_safe
def count_strict (self) :
"""Return the strict count of objects or links."""
result = self.ems.count (self._etype, strict = True)
assert (not self.E_Type.is_partial) or result == 0
return result
# end def count_strict
def cooked_epk (self, epk, kw) :
(epk, kw), this = self._epkified (* epk, ** kw)
raw = kw.get ("raw", False)
epk_iter = (this._raw_epk_iter if raw else this._cooked_epk_iter)
return tuple (epk_iter (epk)), kw, this
# end def cooked_epk
def example (self, full = False) :
with self.home_scope.example_etm (self) as x_etm :
try :
return x_etm.instance_or_new \
(raw = True, ** x_etm._etype.example_attrs (full))
except MOM.Error.Partial_Type as exc :
pass
except Exception as exc :
if __debug__ :
logging.exception ("\n %s.example", self.type_name)
# end def example
def exists (self, * epk, ** kw) :
"""Return true if an object or link with primary key `epk` exists."""
epk, kw, this = self.cooked_epk (epk, kw)
kw.pop ("on_error", None)
if kw :
raise TypeError (kw)
return this.ems.exists (this._etype, epk)
# end def exists
def instance (self, * epk, ** kw) :
"""Return the object or link with primary key `epk` or None."""
epk, kw, this = self.cooked_epk (epk, kw)
return this.ems.instance (this._etype, epk)
# end def instance
def instance_or_new (self, * epk, ** kw) :
try :
result = self.instance (* epk, ** kw)
except MOM.Error.Error :
### let MOM.Entity handle this case
result = None
if result is None :
result = self (* epk, ** kw)
return result
# end def instance_or_new
def pid_query (self, pid) :
"""Return entity with persistent id `pid`."""
return self.ems.pid_query (pid, self._etype)
# end def pid_query
def query_s (self, * filters, ** kw) :
"""Return `self.query (* filters, ** kw)`
sorted by `kw.get ("sort_key", Type.sort_key)`.
"""
### Need to use `Q_Result_Composite` because `Type.sort_key` doesn't
### work with some backends (SQL, I am looking at you)
Type = self._etype
sort_key = kw.pop ("sort_key", Type.sort_key)
result = self.query (* filters, ** kw)
result = self.ems.Q_Result_Composite ([result], sort_key)
return result
# end def query_s
def query_1 (self, * filters, ** kw) :
"""Return the number of matches and the one single entity, if any,
for the conditions in `filters` and `kw`.
"""
q = self.query (* filters, ** kw).limit (2)
c = q.count ()
return c, q.first () if c == 1 else None
# end def query_1
def _epkified (self, * epk, ** kw) :
this = self
etype = self._etype
if epk and isinstance (epk [-1], etype.Type_Name_Type) :
this = self.home_scope [epk [-1]]
epk = epk [:-1]
etype = this._etype
### Don't pass `on_error` through here to avoid `Link.__call__`
### ending up with doubled error messages in case of
### `MOM.Error.Required_Missing`
kw = pkw = dict (kw)
kw.pop ("on_error", None)
if etype.args_as_kw and kw.get ("raw", False) :
pkw = etype._kw_polished \
( etype.epk_as_kw (* epk, ** kw)
, on_error = lambda * args, ** kw : False
)
epk = ()
return etype.epkified (* epk, ** pkw), this
# end def _epkified
# end class Id_Entity
class MD_Entity (Entity) :
"""Scope-specific manager for a specific type of meta-data entities."""
# end class MD_Entity
class Object (Id_Entity) :
"""Scope-specific manager for essential object-types."""
def ac_query_auto_split (self, text) :
result = []
et = self._etype
AQ = et.AQ
epk_aqc = [getattr (AQ, en).AC for en in et.epk_sig]
for epks in et.epk_splitter (text) :
single_value_queries = []
for v in epks :
acqs = [acq (v) for acq in epk_aqc]
single_value_queries.append (TFL.Filter_Or (* acqs))
result.append (self.query (* single_value_queries))
return result
# end def ac_query_auto_split
@property
@getattr_safe
def singleton (self) :
Type = self._etype
if Type.max_count == 1 :
try :
return first (self.query ().limit (2))
except IndexError :
pass
# end def singleton
def _cooked_epk_iter (self, epk) :
for (pka, v) in zip (self._etype.primary, epk) :
if v is not None :
try :
yield pka.cooked (v)
except MOM.Error.No_Such_Entity :
yield None
else :
yield None
# end def _cooked_epk_iter
def _raw_epk_iter (self, epk) :
for (pka, v) in zip (self._etype.primary, epk) :
if v is not None :
try :
yield pka.from_string (v)
except MOM.Error.No_Such_Entity :
yield None
else :
yield None
# end def _raw_epk_iter
# end class Object
class Link (Id_Entity) :
"""Scope-specific manager for essential link-types."""
def __call__ (self, * args, ** kw) :
try :
(args, kw), this = self._epkified (* args, ** kw)
self._checked_roles (* args, ** kw)
if not kw.get ("raw", False) :
args = tuple \
(self._role_to_cooked_iter (args, auto_create = True))
except MOM.Error.Required_Missing :
### let MOM.Entity handle this case
pass
else :
E_Type = self.E_Type
if E_Type.is_partial :
### try to find non-partial child fitting e-types of `roles`
roles = args [:E_Type.number_of_roles]
scope = self.home_scope
BT = scope.MOM.Id_Entity.E_Type
if all (isinstance (r, BT) for r in roles) :
CT = E_Type.child_np (roles)
if CT is not None :
return scope [CT.type_name] (* args, ** kw)
return self.__super.__call__ (* args, ** kw)
# end def __call__
def applicable_objects (self, objects) :
"""Returns all `objects` not refusing to be linked by `self._etype`."""
type_name = self._etype.Essence.type_name
return [o for o in objects if type_name not in o.refuse_links]
# end def applicable_objects
def r_query (self, * filters, ** kw) :
"""Return all links matching the conditions in `filters` and `kw`.
`r_query` behaves similar to `query` but provides the additional
features:
- if `kw` contains role names or other id-entity-attributes,
* the name can be a generic or a specific role name (`query`
only allows generic role names)
* the values passed can be `epk` in cooked or raw form (for
`query`, objects must be passed)
* the returned links are restricted to those linking the
specified objects
"""
Type = self._etype
map = getattr (Type, "role_map", None)
rkw = {}
if map :
for k in list (kw) :
aie = None
if k in map :
aie = Type.Roles [map [k]]
elif k in Type.attributes :
a = Type.attributes [k]
if isinstance (a.attr, MOM.Attr.A_Id_Entity) :
aie = a.attr
if aie is not None :
try :
obj = self._cooked_role (aie, kw.pop (k))
if not isinstance (obj, aie.P_Type) :
return []
rkw [aie.name] = obj
except MOM.Error.No_Such_Entity :
return TFL.Q_Result (())
if rkw :
kw = dict (kw, ** rkw)
result = self.query (* filters, ** kw)
return result
# end def r_query
def r_query_s (self, * filters, ** kw) :
"""Return `self.r_query (* filters, ** kw)`
sorted by `kw.get ("sort_key", Type.sort_key)`.
"""
### Need to use `Q_Result_Composite` because `Type.sort_key` doesn't
### work with some backends (SQL, I am looking at you)
sort_key = kw.pop ("sort_key", self._etype.sort_key)
result = self.r_query (* filters, ** kw)
result = self.ems.Q_Result_Composite ([result], sort_key)
return result
# end def r_query_s
def links_of (self, obj, * filters, ** kw) :
"""Return all links to `obj` (considers `obj` for each of the roles)."""
queries = []
r_query = self.ems.r_query
sort_key = kw.pop ("sort_key", False)
strict = kw.pop ("strict", False)
Type = self._etype
for r in Type.Roles :
if isinstance (obj, r.role_type) :
pk = self._cooked_role (r, obj)
queries.append \
(r_query (r.assoc, {r.name : pk}, strict = strict))
result = self.ems.Q_Result_Composite (queries)
if sort_key is not None :
result = result.order_by (Type.sort_key_pm (sort_key))
return result
# end def links_of
def _checked_roles (self, * epk, ** kw) :
if kw.get ("raw", False) :
epk = tuple (self._raw_epk_iter (epk))
else :
epk = tuple (self._role_to_cooked_iter (epk))
etype = self._etype
errors = []
r_query = self.ems.r_query
for r, pk in zip (etype.Roles, epk) :
if r.max_links >= 0 :
links = r_query (r.assoc, {r.name : pk}, strict = True)
nol = links.count ()
if nol >= r.max_links :
errors.append \
(MOM.Error.Multiplicity (etype, r, pk, epk, * links))
if errors :
exc = errors [0] if len (errors) == 1 else \
MOM.Error.Multiplicity_Errors (_T (etype.ui_name), errors)
raise exc
# end def _checked_roles
def _cooked_role (self, r, v) :
result = v
if v is not None and not isinstance (result, MOM.Entity) :
if not isinstance (v, (dict, tuple, list, int)) :
if not (v.startswith ("(") and v.endswith (")")) :
v = (v, )
result = r.from_string (v)
return result
# end def _cooked_role
def _raw_epk_iter (self, epk) :
for (pka, v) in zip (self._etype.primary, epk) :
try :
if getattr (pka, "role_type", None) :
### Allow role attributes to be passed as objects even if
### `raw` is specified
v = self._cooked_role (pka, v)
elif v is not None :
v = pka.from_string (v)
except MOM.Error.No_Such_Entity :
v = None
yield v
# end def _raw_epk_iter
def _role_to_cooked_iter (self, epk, auto_create = False) :
for (r, (pka, v)) in paired \
(self._etype.Roles, zip (self._etype.primary, epk)) :
if r is not None :
### Allow role attributes to be passed as raw values even if
### `raw` is not specified
try :
v = self._cooked_role (r, v)
except MOM.Error.No_Such_Entity :
if auto_create :
scope = self.home_scope
et = scope [r.role_type.type_name]
if et.is_partial and et.default_child :
et = et.default_child
v = et (* v, implicit = True, raw = True)
else :
v = None
elif v is not None :
try :
v = pka.cooked (v)
except MOM.Error.No_Such_Entity :
v = None
yield v
# end def _role_to_cooked_iter
_cooked_epk_iter = _role_to_cooked_iter
# end class Link
class Link1 (Link) :
"""Scope-specific manager for essential unary link-types."""
# end class Link1
class Link2 (Link) :
"""Scope-specific manager for essential binary link-types."""
### XXX dfc_synthesizer
# end class Link2
class Link3 (Link) :
"""Scope-specific manager for essential ternary link-types."""
# end class Link3
__doc__ = """
`MOM.E_Type_Manager` provides classes implementing scope-specific managers
for essential object and link types.
For each essential object and link type, a scope provides a
`E_Type_Manager` that is accessible under the `type_name` of the essential
type in question.
For instance, the `E_Type_Manager` for an essential
object type `BMT.Mouse` of a scope `s` can be accessed as::
s.BMT.Mouse
and provides methods to create and query instances of `BMT.Mouse`. A new
mouse named `mickey` is created by::
s.BMT.Mouse ("mickey")
The transitive extension of mice, i.e., the extension of `BMT.Mouse` and
all classes derived from it, is computed by the query::
s.BMT.Mouse.query ()
"""
if __name__ != "__main__" :
MOM._Export_Module ()
### __END__ MOM.E_Type_Manager
|
xiaochang91/tapyr
|
_MOM/E_Type_Manager.py
|
E_Type_Manager.py
|
py
| 20,532
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17043247074
|
# https://atcoder.jp/contests/dp/tasks/dp_a
N = int(input())
h = list(map(int, input().split()))
cost = [0] * N
for i in range(1, N):
if i == 1:
cost[i] = abs(h[i] - h[i - 1]) + cost[i - 1]
else:
cost[i] = min(
abs(h[i] - h[i - 1]) + cost[i - 1], abs(h[i] - h[i - 2]) + cost[i - 2]
)
print(cost[N - 1])
|
atsushi-matsui/atcoder
|
best_choise/dp/dp_a.py
|
dp_a.py
|
py
| 349
|
python
|
en
|
code
| 0
|
github-code
|
6
|
32583976944
|
import json
import logging
import os
import threading
from time import sleep
from tqdm import tqdm
from logger import get_logger
machines = [
'4GB-rpi-4B-alpha',
'4GB-rpi-4B-beta',
'2GB-rpi-4B-beta',
'2GB-rpi-4B-alpha',
'cloud1',
'cloud2',
'desktop-remote'
]
ips = {
'4GB-rpi-4B-alpha': '10.0.0.101',
'4GB-rpi-4B-beta': '10.0.0.102',
'2GB-rpi-4B-beta': '10.0.0.104',
'2GB-rpi-4B-alpha': '10.0.0.103',
'cloud1': '10.0.0.201',
'cloud2': '10.0.0.202',
'desktop-remote': '10.0.0.1'}
masterIP = '10.0.0.1'
minActors = len(machines)
class Experiment:
def __init__(self):
self.currPath = os.path.abspath(os.path.curdir)
self.logger = get_logger('Experiment', level_name=logging.DEBUG)
def stopAllContainers(self):
self.logger.info(
'Stopping all containers on where this script is running ...')
os.system('./stopContainer.sh > /dev/null 2>&1')
# self.logger.info('Stopped all containers on where this script is running')
def runRemoteLogger(self):
global masterIP
self.logger.info('Starting RemoteLogger ...')
os.system(
'cd ./newLogger && '
'docker-compose run '
'--rm '
'--name RemoteLogger '
'remote_logger '
'RemoteLogger '
'%s 5001 '
'%s 5000 '
'> /dev/null 2>&1 &' % (masterIP, masterIP))
# self.logger.info('Ran RemoteLogger')
def runMaster(self, schedulerName, initWithLog=False):
global masterIP, minActors
self.logger.info('Starting Master ...')
os.system(
'cd ./newMaster && '
'docker-compose run '
'--rm '
'--name Master '
'master '
'Master '
'%s 5000 '
'%s 5001 '
'%s '
'--minHosts %d '
'%s '
'> /dev/null 2>&1 &'
% (
masterIP,
masterIP,
schedulerName,
minActors,
'--initWithLog True' if initWithLog else ''))
# self.logger.info('Ran Master')
def runActor(self):
global masterIP
self.logger.info('Starting Actor ...')
os.system(
'cd ./newActor && '
'docker-compose run '
'--rm '
'--name Actor '
'Actor '
'Actor '
'%s '
'%s 5000 '
'%s 5001 '
'> /dev/null 2>&1 &' % (
masterIP,
masterIP,
masterIP))
self.logger.info('Ran Actor')
def runUser(self):
self.logger.info('Starting User ...')
os.system(
'cd ./newUser && '
'docker-compose run '
'--rm '
'--name User '
'user '
'User '
'%s '
'%s 5000 '
'%s 5001 '
'GameOfLifePyramid '
'128 '
'--no-show '
'> /dev/null 2>&1 &' % (
masterIP,
masterIP,
masterIP))
self.logger.info('Ran User')
def stopUser(self):
self.logger.info('Stopping User ...')
os.system('./stopContainer.sh User > /dev/null 2>&1')
self.logger.info('Stopped User')
@staticmethod
def readResponseTime(filename):
with open(filename, 'r') as f:
responseTime = json.loads(f.read())
f.close()
os.system('rm -f %s' % filename)
if len(responseTime):
return list(responseTime.values())[0]
return 0
def removeLogs(self):
os.system(
'rm -rf %s/newLogger/sources/profiler/medianPackageSize.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/nodeResources.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/imagesAndRunningContainers.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianProcessTime.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianDelay.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianResponseTime.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianPackageSize.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/nodeResources.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/imagesAndRunningContainers.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/medianProcessTime.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/medianDelay.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/medianResponseTime.json' % self.currPath)
os.system('rm -f %s/newMaster/sources/decisions.json' % self.currPath)
self.logger.info('Removed logs')
def stopLocalTaskExecutor(self):
self.logger.info('Stopping local TaskExecutors ...')
os.system('./stopContainer.sh TaskExecutor > /dev/null 2>&1')
# self.logger.info('Stopped local TaskExecutors')
@staticmethod
def _sshRunScript(machine, script, event, synchronized=False):
if synchronized:
tmp = ''
else:
tmp = '&'
if script == './runActor.sh':
script = '%s %s %s %s' % (script, ips[machine], masterIP, masterIP)
print(script)
os.system('ssh %s \'%s\' > /dev/null 2>&1 %s' % (machine, script, tmp))
event.set()
@staticmethod
def manageRpi(runnable, script, synchronized=False):
global machines
events = [threading.Event() for _ in machines]
for i, machine in enumerate(machines):
threading.Thread(
target=runnable,
args=[machine, script, events[i], synchronized]).start()
for event in events:
event.wait()
def stopRemoteTaskExecutor(self):
self.logger.info('Stopping remote TaskExecutors ...')
self.manageRpi(self._sshRunScript, './stopTaskExecutors.sh')
# self.logger.info('Stopped remote TaskExecutors')
def stopRemoteActors(self):
self.logger.info('Stopping remote Actors ... ')
self.manageRpi(self._sshRunScript, './stopActor.sh', synchronized=True)
# self.logger.info('Stopped remote Actors')
def runRemoteActors(self):
self.logger.info('Starting remote Actors ...')
self.manageRpi(self._sshRunScript, './runActor.sh', synchronized=True)
# self.logger.info('Ran remote Actors')
def rerunNecessaryContainers(self, schedulerName, initWithLog=False):
self.stopAllContainers()
self.stopRemoteActors()
self.runRemoteLogger()
self.runMaster(schedulerName, initWithLog)
# self.runActor()
sleep(5)
self.runRemoteActors()
sleep(1)
def run(
self,
schedulerName,
initWithLog,
roundNum=None,
targetRound=None,
removeLog=False,
repeatTimes=100,
userMaxWaitTime=200):
responseTimeFilePath = '%s/newUser/sources/log/responseTime.json' % self.currPath
os.system('rm -f %s > /dev/null 2>&1' % responseTimeFilePath)
responseTimes = [0 for _ in range(repeatTimes)]
if removeLog:
self.removeLogs()
self.rerunNecessaryContainers(
schedulerName,
initWithLog)
if roundNum is None:
desc = schedulerName
else:
desc = '[%s-%d/%d]' % (schedulerName, roundNum, targetRound)
i = 0
processBar = tqdm(
total=repeatTimes,
desc=desc)
sleep(2)
while i < repeatTimes:
self.runUser()
# self.logger.debug('Waiting for responseTime log file to be created ...')
sleepCount = 0
while not os.path.exists(responseTimeFilePath):
sleepCount += 1
sleep(1)
if sleepCount > userMaxWaitTime:
break
if sleepCount > userMaxWaitTime:
self.rerunNecessaryContainers(schedulerName)
continue
self.stopUser()
responseTimes[i] = self.readResponseTime(
responseTimeFilePath)
self.saveEstimatedRecord(
schedulerName,
roundNum,
i,
initWithLog)
i += 1
processBar.update(1)
self.logger.info('[*] Result-[%d/%d]: %s', i, repeatTimes,
str(responseTimes))
self.stopLocalTaskExecutor()
self.stopRemoteTaskExecutor()
self.saveRes(
schedulerName,
responseTimes,
roundNum,
initWithLog=initWithLog)
self.logger.info(responseTimes)
def runInitWithLog(
self,
initWithLog,
roundNum,
iterNum):
schedulerName = 'NSGA2'
recordPath = './newMaster/sources/record.json'
os.system('rm -f %s' % recordPath)
self.rerunNecessaryContainers(
schedulerName,
initWithLog)
sleep(2)
for i in tqdm(range(iterNum)):
self.runUser()
while not os.path.exists(recordPath):
sleep(1)
self.saveEstimatedRecord(
schedulerName,
roundNum,
i,
initWithLog=initWithLog)
self.stopUser()
self.logger.info('Done init with log')
@staticmethod
def saveEstimatedRecord(
algorithmName,
roundNum,
iterationNum,
initWithLog=False):
os.system('mv '
'./newMaster/sources/record.json '
'./Evaluation-%s-%d-%d.json' % (
'%s%s' % (
algorithmName,
'InitWithLog' if initWithLog else ''),
roundNum,
iterationNum))
@staticmethod
def saveRes(
schedulerName,
responseTimes,
roundNum,
initWithLog):
fix = 'InitWithLog' if initWithLog else ''
if roundNum is None:
filename = '%s.json' % schedulerName
else:
filename = '%s%s-%d.json' % (
schedulerName,
fix,
roundNum)
with open(filename, 'w+') as f:
json.dump(responseTimes, f)
f.close()
if __name__ == '__main__':
experiment = Experiment()
targetRound_ = 1
repeatTimes_ = 100
waitTime = 300
# experiment.runInitWithLog(
# initWithLog=True,
# roundNum=targetRound_,
# iterNum=repeatTimes_)
for num in range(targetRound_):
# experiment.run(
# 'NSGA3',
# False,
# num + 1,
# targetRound_,
# repeatTimes=repeatTimes_,
# removeLog=True,
# userMaxWaitTime=waitTime)
experiment.run(
'NSGA2',
True,
num + 1,
targetRound_,
repeatTimes=repeatTimes_,
removeLog=False,
userMaxWaitTime=waitTime)
# experiment.run(
# 'NSGA2',
# False,
# num + 1,
# targetRound_,
# repeatTimes=repeatTimes_,
# removeLog=True,
# userMaxWaitTime=waitTime)
|
Cloudslab/FogBus2
|
containers/experiment.py
|
experiment.py
|
py
| 11,903
|
python
|
en
|
code
| 17
|
github-code
|
6
|
6814540794
|
from django.urls import path
from . import views
################################################################################
# Registering the app namespace...
# this will allow you to create dynamic Django hyperlinks in html files
# when using the django tag: {% url atomic:tracker ... %} for example.
app_name = "atomic"
################################################################################
urlpatterns = [
path("", views.index, name="index"),
path("tracker/", views.tracker, name="tracker"),
path("about/", views.about, name="about"),
path("api/", views.api, name="api"),
path("api/updateDB/", views.updateDB, name="updateDB")
]
|
chinchay/habit-tracker
|
backend/atomic/urls.py
|
urls.py
|
py
| 669
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71484222588
|
class UnionFindTree:
"""Disjoint-Set Data Structure
Union-Find Tree
complexity:
init: O(n)
find, unite, same: O(alpha(n))
used in SRM505 div.2 900, ATC001 A, DSL1A(AOJ)
"""
def __init__(self, n):
self.par = list(range(n)) # parent
self.rank = [0] * n # depth of tree
def find(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.find(self.par[x])
return self.par[x]
def unite(self, x, y):
x, y = self.find(x), self.find(y)
if x == y:
return
if self.rank[x] < self.rank[y]:
self.par[x] = y
else:
self.par[y] = x
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def same(self, x, y):
return self.find(x) == self.find(y)
N = int(input())
M = 10 ** 5
uft = UnionFindTree(2 * M)
for _ in range(N):
x, y = map(int, input().split())
x, y = x-1, y-1
uft.unite(x, y + M)
x_dict, y_dict = dict(), dict()
for x in range(M):
par_x = uft.find(x)
x_dict[par_x] = x_dict.get(par_x, 0) + 1
for y in range(M, 2 * M):
par_y = uft.find(y)
y_dict[par_y] = y_dict.get(par_y, 0) + 1
ans = -N
for r in x_dict.keys():
ans += x_dict.get(r, 0) * y_dict.get(r, 0)
print(ans)
|
knuu/competitive-programming
|
atcoder/abc/abc131_f.py
|
abc131_f.py
|
py
| 1,339
|
python
|
en
|
code
| 1
|
github-code
|
6
|
45561392364
|
import csv
CSV_PATH =""
reader = csv.reader(opne(CSV_PATH, 'rt', encoding='cp494'), delimiter="|")
columns = next(reader)
for idx, row in enumerate(reader):
row = dict(zip(columns, row))
print(data['우편번호'])
if idx > 100:
break
#만약 db에 있는 데이터를 가져 오는 경우라면
'''
유니코드로 사용하다가 데이터를 가장 마지막에 밖으로 내보내야 할 때 인코딩을 한다.
cp949 = 한글 문자열만 처리 가능함.
utf8 = 모든 언어를 다 통달.
msql의 경우
create database (default encoding="utf8")
unicode >> utf8 : decoding
utf8 >> unicode : encoding
'''
|
rheehyerin/programming_hw
|
read_file.py
|
read_file.py
|
py
| 670
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
646604887
|
import copy
import logging
import os
from gunpowder.caffe.net_io_wrapper import NetIoWrapper
from gunpowder.ext import caffe
from gunpowder.nodes.generic_predict import GenericPredict
from gunpowder.volume import VolumeType, Volume
logger = logging.getLogger(__name__)
class StupidPredict(object):
'''Augments a batch with network predictions.
Args:
prototxt (string): Filename of the network prototxt.
weights (string): Filename of the network weights.
inputs (dict): Dictionary from the names of input layers in the
network to :class:``VolumeType`` or batch attribute name as string.
outputs (dict): Dictionary from the names of output layers in the
network to :class:``VolumeType``. New volumes will be generated by
this node for each entry (if requested downstream).
volume_specs (dict, optional): An optional dictionary of
:class:`VolumeType` to :class:`VolumeSpec` to set the volume specs
generated volumes (``outputs``). This is useful to set the
``voxel_size``, for example, if they differ from the voxel size of
the input volumes. Only fields that are not ``None`` in the given
:class:`VolumeSpec` will be used.
use_gpu (int): Which GPU to use. Set to ``None`` for CPU mode.
'''
def __init__(
self,
prototxt,
weights,
inputs,
outputs,
volume_specs=None,
use_gpu=None):
for f in [prototxt, weights]:
if not os.path.isfile(f):
raise RuntimeError("%s does not exist"%f)
self.prototxt = prototxt
self.weights = weights
self.inputs = inputs
self.outputs = outputs
if use_gpu is not None:
logger.debug("Predict process: using GPU %d"%use_gpu)
caffe.enumerate_devices(False)
caffe.set_devices((use_gpu,))
caffe.set_mode_gpu()
caffe.select_device(use_gpu, False)
self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST)
self.net_io = NetIoWrapper(self.net, self.outputs.values())
def __call__(self, input_data):
assert isinstance(input_data, dict)
self.net_io.set_inputs({
input_name: data
for input_name, data in input_data.items()
})
self.net.forward()
output = self.net_io.get_outputs()
return output
|
constantinpape/gunpowder-experiments
|
experiments/inference/stupid_predict.py
|
stupid_predict.py
|
py
| 2,500
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19373198646
|
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
url = 'http://parsinger.ru/selenium/1/1.html'
text = ['Name', 'Surname', 'Sursurname', 'Age', 'City', 'EMAIL']
with webdriver.Chrome() as browser:
browser.get(url)
inputs = browser.find_elements(By.CLASS_NAME, 'form')
button = browser.find_element(By.ID, 'btn')
for i, item in enumerate(inputs):
print(item.text)
item.send_keys(text[i])
button.click()
time.sleep(50)
|
spac3orange/Web-parsing-study
|
Selenium/search_elements/tasks/task1_5sek.py
|
task1_5sek.py
|
py
| 496
|
python
|
en
|
code
| 1
|
github-code
|
6
|
39209939169
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from scipy.interpolate import griddata
import copy
# import tecplot as tp
# with open('Rectangle_EXP.dat') as Rectangle_EXP:
# all_data =
# D rectangle = 100
def load_data(fname):
# To load tecplot dat to dataframe
# fname: file name
data = np.genfromtxt(fname, delimiter=' ', skip_header=10)
dataset = pd.DataFrame(data,columns=['X_D','Y_D','Z_D','U_Uinf'])
return dataset
def D_area(D,H):
# Calculate D_area for rectangle
D_area = ((4/math.pi)*D*H)**0.5
return D_area
def D_star(D,H):
# Calculate D_star for rectangle
D_star = 2*D*H/(D+H)
return D_star
def wake_scaling(dataset, D_gometry, D_scaling):
# To scale the wake.
# D_geometry: geometry diameter
# D_scaling: scale length
df = copy.copy(dataset)
df['X_D'] = df['X_D'] * D_gometry / D_scaling
df['Y_D'] = df['Y_D'] * D_gometry / D_scaling
df['Z_D'] = df['Z_D'] * D_gometry / D_scaling
return df
def extract_plane(dataset, xd):
# To interplate results in desired plane.
# dataset: normalized dataframe
# XD: a downstream position
df = dataset.loc[np.round(dataset['X_D'], 1)==round(xd,1)]
y = np.arange(np.min(df['Y_D']), np.max(df['Y_D']), 0.01)
z = np.arange(np.min(df['Z_D']), np.max(df['Z_D']), 0.01)
yg, zg = np.meshgrid(y, z)
u = griddata((df['Y_D'], df['Z_D']), df['U_Uinf'], (yg, zg), method='linear')
return yg, zg, u
def extract_line(yg,zg,u, zd):
yl = yg[np.round(zg,2)==round(zd,2)]
ul = u[np.round(zg,2)==round(zd,2)]
return yl,ul
# def grid_interplation(Rectangle):
# XX, YY, ZZ = np.meshgrid(Rectangle[['X_D']],Rectangle[['Y_D']],Rectangle[['Z_D']],sparse=True)
# %% Scaling length calculation
D_cir = 200; D_squ = 200; D_rec = 100; H_rec = 300
# Area-based Shammensodin and Port-Agel
D_area_cir = D_cir
D_area_squ = D_area(D_squ,D_squ)
D_area_rec = D_area(D_rec,H_rec)
# Area and perimeter based scaling length
D_star_cir = D_cir
D_star_squ = D_star(D_squ,D_squ)
D_star_rec = D_star(D_rec,H_rec)
# %% Read tecplot .dat file
f1 = 'Rectangle_EXP.dat'
f2 = 'Circle_EXP.dat'
f3 = 'Square_EXP.dat'
Rectangle = load_data(f1)
Circle = load_data(f2)
Square = load_data(f3)
# wake scaling
# D: loaded already
# %%
# D_area
Cir_area = wake_scaling(Circle,D_cir,D_area_cir)
Squ_area = wake_scaling(Square,D_squ,D_area_squ)
Rec_area = wake_scaling(Rectangle,D_rec,D_area_rec)
# %%
# D_star
Cir_star = wake_scaling(Circle,D_cir,D_star_cir)
Squ_star = wake_scaling(Square,D_squ,D_star_squ)
Rec_star = wake_scaling(Rectangle,D_rec,D_star_rec)
# %% Interpolation at postions wanted:
xd = np.linspace(1.,5.,5)
zd = np.linspace(0.5,0.,3)
# for i,x in enumerate(xd):
# yg, zg, u = extract_plane(Cir_area,x)
# for j,z in enumerate(zd):
# plt.subplot(len(zd),len(xd),len(zd)*i + j+1)
# yl,ul = extract_line(yg,zg,u, z)
# ul = ul[yl<0.6]; yl = yl[yl<0.6]
# ul = ul[yl>-1]; yl = yl[yl>-1]
# plt.plot(yl,ul)
# for i,x in enumerate(xd):
# yg, zg, u = extract_plane(Squ_area,x)
# for j,z in enumerate(zd):
# plt.subplot(len(zd),len(xd),len(zd)*i + j+1)
# yl,ul = extract_line(yg,zg,u, z)
# ul = ul[yl<1]; yl = yl[yl<1]
# ul = ul[yl>-1]; yl = yl[yl>-1]
# plt.plot(yl,ul)
# for i,x in enumerate(xd):
# yg, zg, u = extract_plane(Rec_area,x)
# for j,z in enumerate(zd):
# plt.subplot(len(zd),len(xd),len(zd)*i + j+1)
# yl,ul = extract_line(yg,zg,u, z)
# ul = ul[yl<1]; yl = yl[yl<1]
# ul = ul[yl>-1]; yl = yl[yl>-1]
# plt.plot(yl,ul)
# plt.show()
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# %% plot D_area
fig1, axs1 = plt.subplots(len(zd),len(xd), sharex=True, sharey=True, gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(30,10))
fig1.suptitle('U/U_inf')
axs1[0,0].set_ylabel('Z/Darea=0.5')
axs1[1,0].set_ylabel('Z/Darea=0.25')
axs1[2,0].set_ylabel('Z/Darea=0')
# xlabel
for i,x in enumerate(xd):
axs1[2,i].set_xlabel('Y/Darea')
axs1[0,0].set_title('X/Darea=1')
axs1[0,1].set_title('X/Darea=2')
axs1[0,2].set_title('X/Darea=3')
axs1[0,3].set_title('X/Darea=4')
axs1[0,4].set_title('X/Darea=5')
plt.setp(axs1, xlim=(-0.99,0.99))
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Cir_area,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.6]; yl = yl[yl<0.6]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs1[j, i].plot(yl,ul,color='grey',marker='o', fillstyle='full', markevery=8,markersize=12,linestyle='None')
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Squ_area,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs1[j, i].plot(yl,ul,color='red',marker='s', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Rec_area,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs1[j, i].plot(yl,ul,color='blue',marker='d', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
# %% plot D_star
fig2, axs2 = plt.subplots(len(zd),len(xd), sharex=True, sharey=True, gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(30,10))
fig1.suptitle('U/U_inf')
axs2[0,0].set_ylabel('Z/Dstar=0.5')
axs2[1,0].set_ylabel('Z/Dstar=0.25')
axs2[2,0].set_ylabel('Z/Dstar=0')
# xlabel
for i,x in enumerate(xd):
axs2[2,i].set_xlabel('Y/Dstar')
axs2[0,0].set_title('X/Dstar=1')
axs2[0,1].set_title('X/Dstar=2')
axs2[0,2].set_title('X/Dstar=3')
axs2[0,3].set_title('X/Dstar=4')
axs2[0,4].set_title('X/Dstar=5')
plt.setp(axs2, xlim=(-0.99,0.99))
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Cir_star,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.6]; yl = yl[yl<0.6]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs2[j, i].plot(yl,ul,color='grey',marker='o', fillstyle='full', markevery=8,markersize=12,linestyle='None')
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Squ_star,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs2[j, i].plot(yl,ul,color='red',marker='s', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Rec_star,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs2[j, i].plot(yl,ul,color='blue',marker='d', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
# %% save fig
fig1.savefig('U_Darea.svg', dip = 300)
fig2.savefig('U_Dstar.svg', dip = 300)
# %%
|
hmharley/FlowData_processing_py
|
source/Plot.py
|
Plot.py
|
py
| 7,313
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71454711549
|
import os
import sys
if len(sys.argv) == 1:
f1 = open("newdummy.txt", 'w+')
f1.write("This is new file text. This will be re-read once again.")
f1.seek(0)
print("We wrote the following:")
print(f1.read())
f1.close()
else:
print("Too many or too few arguments.")
|
axmenon/python-training
|
linux_rw.py
|
linux_rw.py
|
py
| 315
|
python
|
en
|
code
| 0
|
github-code
|
6
|
28774253567
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Leonardo La Rocca
"""
import melopero_RV_3028 as mp
import datetime
import gpiozero as gpio
from signal import pause
def main():
# First initialize and create the rtc device
rtc = mp.RV_3028()
# Set the device to use the 24hour format (default) instead of the 12 hour format
rtc.set_12h_format(False)
# Then set the date and time.
# retrieve the datetime from the library datetime
current_datetime = datetime.datetime.now()
# set the date and time for the device
rtc.set_time(current_datetime.hour, current_datetime.minute, current_datetime.second)
rtc.set_date(current_datetime.weekday(), current_datetime.day, current_datetime.month, current_datetime.year % 2000)
# First disable other sources of interrupts
rtc.enable_timer(enable=False, repeat=False, generate_interrupt=False)
rtc.enable_periodic_time_update_interrupt(generate_interrupt=False)
rtc.clear_interrupt_flags()
# set the alarm to trigger 2 minutes from now
minute_alarm = current_datetime.minute + 2 % 60
rtc.set_minute_alarm(minute_alarm)
rtc.enable_alarm(enable=True, generate_interrupt=True)
print("Alarm set to trigger two minutes from now...")
print("The alarm will trigger every hour at minute {}".format(minute_alarm))
# interrupt routine
def on_interrupt():
print("Alarm: beep beep")
print(rtc.get_time())
print("Press CTRL + C to terminate program...")
rtc.clear_interrupt_flags()
# set the pin to listen to interrupts
int_listener_pin = "GPIO4"
interrupt = gpio.Button(int_listener_pin, pull_up=None, active_state=False)
interrupt.when_pressed = on_interrupt
pause()
if __name__ == "__main__":
main()
|
melopero/Melopero_RV-3028
|
examples/alarm_interrupt_example.py
|
alarm_interrupt_example.py
|
py
| 1,795
|
python
|
en
|
code
| 2
|
github-code
|
6
|
2124151948
|
import asyncio
import inspect
import sys
import json
import socket
from contextlib import redirect_stdout, suppress
from traceback import format_exc
from typing import Dict, Callable
from copy import copy
from gornilo.models.api_constants import *
from gornilo.models.action_names import INFO, CHECK, PUT, GET, TEST
from gornilo.models.checksystem_request import CheckRequest, PutRequest, GetRequest
from gornilo.models.verdict import Verdict
from gornilo.setup_logging import setup_logging
with suppress(ImportError):
import requests
class Checker:
def __init__(self):
self.__info_distribution = {}
self.__multiple_actions = frozenset((PUT, GET))
self.__actions_handlers: Dict[str, Dict[int, Callable[[CheckRequest], Verdict]]] = {
CHECK: None,
PUT: {},
GET: {},
}
@staticmethod
def __check_function(func: callable, func_type: type):
func_name = func.__code__.co_name
func_args_spec = inspect.getfullargspec(func)
if func_args_spec.annotations.get("return") != Verdict:
raise TypeError(f"Checker function ({func_name}) should return {Verdict} object!")
if len(func_args_spec.args) < 1:
raise TypeError(f"{func_name} should have 1 or more args!")
func_arg_name = func_args_spec.args[0]
func_arg_type = func_args_spec.annotations.get(func_arg_name)
if not issubclass(func_arg_type, func_type):
raise TypeError(f"{func_name} first arg should be typed as {func_type}")
def __register_action(self, action_name: str, action: callable, action_period: int = None):
if action_name in self.__multiple_actions:
if action_period is None:
raise ValueError("Period should not be None for multiple actions!")
self.__actions_handlers[action_name][action_period] = action
else:
if action_name in self.__actions_handlers:
if self.__actions_handlers[action_name] is not None:
raise ValueError("Action has been already registered!")
self.__actions_handlers[action_name] = action
else:
raise ValueError("Incorrect action name!")
def __run_tests(self, team_ip) -> int:
from gornilo.utils import measure, generate_flag
from uuid import uuid4
import subprocess
return_codes = []
with measure(CHECK):
check_result = subprocess.run([sys.executable, sys.argv[0], CHECK, team_ip], text=True, capture_output=True)
print(f"Check completed with {check_result.returncode} exitcode, "
f"stdout: {check_result.stdout}, "
f"stderr: {check_result.stderr}")
return_codes.append(check_result.returncode)
info_response = subprocess.run([sys.executable, sys.argv[0], INFO], text=True, capture_output=True).stdout
vulns_amount = len(info_response.split("\n")[0].split(":")) - 1
for i in range(vulns_amount):
flag = generate_flag()
flag_id = str(uuid4())
with measure(f"{PUT} vuln {i + 1}"):
put_result = subprocess.run([sys.executable, sys.argv[0], PUT, team_ip, flag_id, flag, str(i + 1)],
text=True, capture_output=True)
print(f"{PUT} exited with {put_result.returncode}, "
f"stdout: {put_result.stdout}, "
f"stderr: {put_result.stderr}")
return_codes.append(put_result.returncode)
if put_result.stdout:
flag_id = put_result.stdout
with measure(f"{GET} vuln {i + 1}"):
get_result = subprocess.run([sys.executable, sys.argv[0], GET, team_ip, flag_id, flag, str(i + 1)],
text=True, capture_output=True)
print(f"{GET} exited with {get_result.returncode}, "
f"stdout: {get_result.stdout}, "
f"stderr: {get_result.stderr}")
return_codes.append(get_result.returncode)
print(f"All return codes: {return_codes}, using max as a return value. 101 transforms to 0")
return max(return_codes)
def define_check(self, func: callable) -> callable:
self.__check_function(func, CheckRequest)
self.__register_action(CHECK, func)
return func
def define_put(self, vuln_num: int, vuln_rate: int) -> callable:
if not isinstance(vuln_num, int) or vuln_num < 1:
raise TypeError(f'You should provide vulnerability natural number as a decorator argument!')
def wrapper(func: callable):
self.__check_function(func, PutRequest)
self.__register_action(PUT, func, vuln_num)
self.__info_distribution[vuln_num] = vuln_rate
return func
return wrapper
def __extract_info_call(self):
return VULNS + VULNS_SEP.join(str(self.__info_distribution[key]) for key in sorted(self.__info_distribution))
def define_get(self, vuln_num: int) -> callable:
if not isinstance(vuln_num, int) or vuln_num < 1:
raise TypeError(f'You should provide vulnerability natural number as a decorator argument!')
def wrapper(func: callable):
self.__check_function(func, GetRequest)
self.__register_action(GET, func, vuln_num)
return func
return wrapper
def __async_wrapper(self, func_result):
if asyncio.iscoroutine(func_result):
return asyncio.run(func_result)
return func_result
def __try_extract_public_flag_id(self, request_content: dict) -> dict or None:
try:
request_content = copy(request_content)
flag_id = request_content["flag_id"]
json_flag_id = json.loads(flag_id)
public_flag_id = json_flag_id.pop(PUBLIC_FLAG_ID)
private_flag_id = json_flag_id.pop(PRIVATE_CONTENT)
request_content[PUBLIC_FLAG_ID] = public_flag_id
if not isinstance(private_flag_id, str):
private_flag_id = json.dumps(private_flag_id)
request_content["flag_id"] = private_flag_id
return request_content
except Exception:
# any exception here means something gone wrong with json;
# should fallback to legacy models
return None
# noinspection PyProtectedMember
def run(self, *args):
setup_logging()
result = Verdict.CHECKER_ERROR("Something gone wrong")
try:
if not args:
args = sys.argv[1:]
with redirect_stdout(sys.stderr):
result = self.__run(*args)
if type(result) != Verdict:
print(f"Checker function returned not Verdict value, we need to fix it!", file=sys.stderr)
result = Verdict.CHECKER_ERROR("")
except Verdict as verdict:
result = verdict
except Exception as e:
print(f"Checker caught an error: {e},\n {format_exc()}", file=sys.stderr)
result = Verdict.CHECKER_ERROR("")
if isinstance(e, socket.timeout):
result = Verdict.DOWN("Socket timeout")
if "requests" in globals() and any(isinstance(e, exc) for exc in (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects)):
result = Verdict.DOWN("Could not process routine due to timeout or connection error!")
finally:
if result._public_message:
print(result._public_message, file=sys.stdout)
sys.exit(result._code)
def __run(self, command=None, hostname=None, flag_id=None, flag=None, vuln_id=None) -> Verdict:
commands = [CHECK, PUT, GET, INFO, TEST]
if command is None:
raise ValueError("Expected 1 or more args!")
command = command.upper()
if command not in commands:
raise ValueError(f"Unknown ({command}) command! (Expected one of ({','.join(commands)})")
if command == INFO:
return Verdict.OK(self.__extract_info_call())
if hostname is None:
raise ValueError("Can't find 'hostname' arg! (Expected 2 or more args)")
check_func = self.__actions_handlers[CHECK]
request_content = {
"hostname": hostname
}
if command == CHECK:
# noinspection PyCallingNonCallable
return self.__async_wrapper(check_func(CheckRequest(**request_content)))
if command == TEST:
return_code = self.__run_tests(hostname)
return Verdict(0 if return_code == 101 else return_code, "Tests has been finished")
if flag_id is None:
raise ValueError("Can't find 'flag_id' arg! (Expected 3 or more args)")
if flag is None:
raise ValueError("Can't find 'flag' arg (Expected 4 or more args)")
if vuln_id is None:
raise ValueError("Can't find 'vuln_id' arg (Expected 5 or more args)")
try:
vuln_id = int(vuln_id)
assert vuln_id > 0
assert vuln_id in self.__actions_handlers[PUT]
assert vuln_id in self.__actions_handlers[GET]
except (TypeError, AssertionError):
raise ValueError("'vuln_id' should be representative as a natural number, "
f"{GET}/{PUT} methods should be registered in checker!")
put_func = self.__actions_handlers[PUT][vuln_id]
get_func = self.__actions_handlers[GET][vuln_id]
request_content.update({
"flag_id": flag_id,
"flag": flag,
"vuln_id": vuln_id
})
if command == PUT:
return self.__async_wrapper(put_func(PutRequest(**request_content)))
if command == GET:
result = self.__try_extract_public_flag_id(request_content)
return self.__async_wrapper(get_func(GetRequest(**(result or request_content))))
raise RuntimeError("Something went wrong with checker scenario :(")
|
HackerDom/Gornilo
|
gornilo/actions.py
|
actions.py
|
py
| 10,242
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36242169771
|
"""
RUNBASE-IMP
HTML scraping bot for monitoring Adidas Runners events
Author: Francesco Ramoni
francesco[dot]ramoni@email.it
https://github.com/framoni/
"""
import json
from lxml import html
from selenium import webdriver
import time
from twilio.rest import Client
#-------------------------------------------------------------------------------
# PARAMETERS
# url to be scraped
ar_url = 'https://www.adidas.it/adidasrunners/community/milano'
# request header
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
# message
message = 'Le iscrizioni agli eventi Adidas Runners di questa settimana sono state effettuate. runbase-imp'
# twilio data
with open("runbase-imp-param.json") as j:
for line in j:
td = json.loads(line)
# set webdriver options
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument(f'user-agent={user_agent}')
#-------------------------------------------------------------------------------
# FUNCTIONS
# function to scrape event url
def scrape_event_url(event_name):
browser.get(ar_url)
event_url = browser.find_element_by_xpath('//a[contains(div//div//h3, "{}") and contains(., "SENZA spogliatoio")]'.format(event_name)).get_attribute("href")
return event_url
# function to sign up to an event
def event_signup(event_name, do_login):
print("Event: {}".format(event_name))
event_url = scrape_event_url(event_name)
# go to event page
browser.get(event_url)
# login
if do_login:
login()
# wait 10 seconds to bypass a UI visual bug (?) showing a second login form
time.sleep(10)
# sign up to the event
button_signup = browser.find_element_by_xpath('//*[@title="Iscriviti"]')
browser.execute_script("arguments[0].click();", button_signup)
# function to login to the portal
def login():
# click on login button
button_login = browser.find_element_by_xpath('//*[@title="Accedi"]')
browser.execute_script("arguments[0].click();", button_login)
# send username, password and confirm
browser.find_element_by_id('email').send_keys(td['email'])
browser.find_element_by_id('password').send_keys(td['pass'])
button_send = browser.find_element_by_xpath('//*[@title="Invia"]')
browser.execute_script("arguments[0].click();", button_send)
return
#-------------------------------------------------------------------------------
# SCRAPING
# create a new driver
browser = webdriver.Chrome(chrome_options=options)
browser.implicitly_wait(60)
print("Signing up to the Adidas Runners events... ")
# sign up to events of interest
event_signup('MONDAY HEROES', True)
event_signup('ROAD TO YOUR BEST', False)
# close the driver
browser.quit()
# send a SMS to notify
print("Notifying via SMS... ")
client = Client(td['twilio_client'], td['twilio_token'])
client.messages.create(to=td['phone_to'], from_=td['phone_from'], body=message)
print("Job done. ")
|
framoni/runbase-imp
|
main.py
|
main.py
|
py
| 3,011
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29456733472
|
from __future__ import print_function
import sys
from atrope import exception
from atrope.cmd import image_list
from atrope.cmd import version
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
def add_command_parsers(subparsers):
image_list.CommandImageListIndex(subparsers)
image_list.CommandImageListFetch(subparsers)
image_list.CommandImageListCache(subparsers)
image_list.CommandDispatch(subparsers)
version.CommandVersion(subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Show available commands.',
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
LOG = log.getLogger(__name__)
class CommandManager(object):
def execute(self):
try:
LOG.info("Atrope session starts >>>>>>>>>>")
CONF.command.func()
except exception.AtropeException as e:
print("ERROR: %s" % e, file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
print("\nExiting...", file=sys.stderr)
sys.exit(0)
finally:
LOG.info("Atrope session ends <<<<<<<<<<<<")
|
alvarolopez/atrope
|
atrope/cmd/commands.py
|
commands.py
|
py
| 1,235
|
python
|
en
|
code
| 2
|
github-code
|
6
|
73652308669
|
# 给你一个 只包含正整数 的 非空 数组 nums 。请你判断是否可以将这个数组分割成两个子集,使得两个子集的元素和相等。
class Solution(object):
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
sumnums = 0
for i in nums:
sumnums += i
if sumnums % 2 != 0:
return False
sumnums = int(sumnums / 2)
dp = [[False for col in range(sumnums + 1)] for row in range(len(nums) + 1)]
for i in range(0, len(nums)):
dp[i][0] = True
for i in range(1, len(nums)+1):
for j in range(1, sumnums + 1):
if j >= nums[i - 1]:
dp[i][j] = dp[i-1][j] or dp[i-1][j - nums[i - 1]]
else:
dp[i][j] = dp[i-1][j]
return dp[len(nums)-1][sumnums]
nums = [1,2,5]
a = Solution()
print(a.canPartition(nums))
|
xxxxlc/leetcode
|
Dynamicprogramming/canPartition.py
|
canPartition.py
|
py
| 980
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42929655074
|
from collections import defaultdict
class Solution:
def accountsMerge(self, accounts):
email_accounts_map = defaultdict(list)
visited_accounts = [False]*len(accounts)
result = []
for i, account in enumerate(accounts):
for j in range(1, len(account)):
email = account[j]
email_accounts_map[email].append(i)
def dfs(i, emails):
if visited_accounts[i]:
return
visited_accounts[i] = True
for j in range(1, len(accounts[i])):
email = accounts[i][j]
emails.add(email)
for neighbours in email_accounts_map[email]:
dfs(neighbours, emails)
for i, account in enumerate(accounts):
if visited_accounts[i]:
continue
name, emails = account[0], set()
dfs(i, emails)
result.append([name] + sorted(emails))
return result
obj = Solution()
accounts = [["John","johnsmith@mail.com","john_newyork@mail.com"],["John","johnsmith@mail.com","john00@mail.com"],["Mary","mary@mail.com"],["John","johnnybravo@mail.com"]]
ans = obj.accountsMerge(accounts)
print(ans)
|
shwetakumari14/Leetcode-Solutions
|
Miscellaneous/Python/721. Accounts Merge.py
|
721. Accounts Merge.py
|
py
| 1,279
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1796292061
|
from hashlib import md5
from typing import Union
def hash_encode(data: Union[str, bytes],
return_bytes: bool = False) -> Union[str, bytes]:
if isinstance(data, str):
data = data.encode()
output = md5(data)
return output.digest() if return_bytes else output.hexdigest()
|
FZQ0003/Qi-Bot
|
utils/hash.py
|
hash.py
|
py
| 307
|
python
|
en
|
code
| 1
|
github-code
|
6
|
37373995341
|
#!/usr/bin/env python
"""
ONS Address Index - Land Registry Data
======================================
A simple script to process land registry sales data.
The original data were downloaded on the 10th of November from:
https://data.gov.uk/dataset/land-registry-monthly-price-paid-data
Because the AddressBased used by the prototype is Epoch 39 (from April) it does not contain all
new builds with new postcodes. This scripts allows to identify those postcodes that do not exist
in the Epoch 39 AddressBase.
Running
-------
The script can be run from command line using CPython::
python landRegistryData.py
Requirements
------------
:requires: pandas
:requires: numpy
Author
------
:author: Sami Niemi (sami.niemi@valtech.co.uk)
Version
-------
:version: 0.2
:date: 18-Nov-2016
"""
import pandas as pd
import numpy as np
import os
if os.environ.get('LC_CTYPE', '') == 'UTF-8':
os.environ['LC_CTYPE'] = 'en_US.UTF-8'
def loadData(filename='pp-monthly-update.csv', path='/Users/saminiemi/Projects/ONS/AddressIndex/data/'):
"""
Read in the Land Registry testing data.
The data were downloaded from:
https://data.gov.uk/dataset/land-registry-monthly-price-paid-data
The header was grabbed from:
https://www.gov.uk/guidance/about-the-price-paid-data#explanations-of-column-headers-in-the-ppd
:param filename: name of the CSV file holding the data
:type filename: str
:param path: location of the test data
:type path: str
:return: pandas dataframe of the data (no UPRNs)
:rtype: pandas.DataFrame
"""
df = pd.read_csv(path + filename, low_memory=False, parse_dates=[2, ], infer_datetime_format=True)
print('Found', len(df.index), 'addresses from the land registry sales data...')
return df
def loadAddressBaseData(filename='AB.csv', path='/Users/saminiemi/Projects/ONS/AddressIndex/data/ADDRESSBASE/'):
"""
Load a compressed version of the full AddressBase file. The information being used
has been processed from a AB Epoch 39 files provided by ONS.
:param filename: name of the file containing modified AddressBase
:type filename: str
:param path: location of the AddressBase combined data file
:type path: str
:return: pandas dataframe of the requested information
:rtype: pandas.DataFrame
"""
df = pd.read_csv(path + filename, usecols=['POSTCODE', 'POSTCODE_LOCATOR'])
print('Found', len(df.index), 'addresses from AddressBase...')
# combine PAF and NAG information
msk = df['POSTCODE'].isnull()
df.loc[msk, 'POSTCODE'] = df.loc[msk, 'POSTCODE_LOCATOR']
return df
def testIfPostcodeExists(ab, landRegistry):
"""
A simple function to identify those postcodes that are present in the land registry data but
missing from AddressBase. Most of these are new buildings. One should consider removing these
from the testing of prototype matching.
:param ab: dataframe containing addressbase information
:type ab: pandas.DataFrame
:param landRegistry: dataframe containing land registry data
:type landRegistry: pandas.DataFrame
:return: None
"""
# find unique postcodes from AddressBase
ABpostcodes = np.unique(ab['POSTCODE'].values)
# those land registry postcodes that are not present in AddressBase are newbuilds
msk = landRegistry['Postcode'].isin(ABpostcodes)
# get those addresses that have a postcode in AB and identify missing postcodes
lr = landRegistry.loc[~msk]
missingPostcodes = np.unique(lr.loc[lr['Postcode'].notnull(), 'Postcode'].values)
print('Missing Postcodes:')
print(missingPostcodes)
print('In total', len(missingPostcodes), 'postcodes in sales data without AB counterpart')
print('In total', len(lr.index), 'addresses without counterparts')
# find those with postcode counterparts and save to a file
msk = ~landRegistry.Postcode.isin(missingPostcodes)
lr = landRegistry.ix[msk]
path = '/Users/saminiemi/Projects/ONS/AddressIndex/data/'
print('After removing postcodes without counterpart', len(lr.index), 'address remain...')
lr.to_csv(path + 'pp-monthly-update-Edited.csv', index=False)
# record also those without postcode counterpart
lr = landRegistry.ix[~msk]
print(len(lr.index), 'addresses without postcodes...')
lr.to_csv(path + 'pp-monthly-update-no-postcode.csv', index=False)
if __name__ == "__main__":
ab = loadAddressBaseData()
lr = loadData()
testIfPostcodeExists(ab, lr)
|
ONSdigital/address-index-data
|
DataScience/Analytics/data/landRegistryData.py
|
landRegistryData.py
|
py
| 4,514
|
python
|
en
|
code
| 18
|
github-code
|
6
|
41978220901
|
from task_3 import Bucket, Unbucketed, JoinBuckets
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, DateType, IntegerType
from datetime import datetime
import pytest
spark = SparkSession.builder.appName("Clients").getOrCreate()
# schema for trx_table
schema = StructType([
StructField('ndc11', StringType()),
StructField('invoice_amount', IntegerType()),
StructField('invoice_quantity', IntegerType()),
StructField('bu_dir.state', StringType()),
StructField('bu_whl.state', StringType()),
StructField('invoice_date', DateType()),
])
# data for trx_table
data = [(1, 100, 10, 'USA', 'USA', datetime(2019, 1, 3)),
(1, 24, 20, 'NM', 'NM', datetime(2019, 2, 24)),
(1, 200, 23, 'USA', 'USA', datetime(2019, 1, 20)),
(2, 270, 14, 'USA', 'USA', datetime(2019, 3, 15)),
(2, 340, 55, 'USA', 'PR', datetime(2019, 2, 17)),
(2, 312, 34, 'NM', 'USA', datetime(2019, 2, 4)),
(2, 425, 22, None, 'USA', datetime(2019, 2, 9))
]
# create test_df
trx_table = spark.createDataFrame(data, schema=schema)
# add column 'month'
trx_table = trx_table.withColumn('month', F.date_format('invoice_date', 'yyyyMM'))
# from task_4 import Bucket, prepare_df_with_month
# import pytest
# from datetime import datetime
# from pyspark.sql import functions as F
# from pyspark.sql import Column
# from pyspark.sql.types import StructType, StructField, DateType
#
#
# @pytest.fixture
# def bucket(trx_table):
# agg_cols = [(F.col('invoice_amount'), 'cost'), (F.col('invoice_quantity'), 'quan')]
# bucket = Bucket(trx_table, [trx_table['`bu_dir.state`'] == 'USA'], ['ndc11', 'month'], agg_cols, 'dir_sls_')
# return bucket
#
#
# def test_adjust_prefix(bucket, trx_table):
# bucket = bucket.adjust_prefix(trx_table)
# assert 'dir_sls_invoice_amount' in bucket.columns
# assert 'ndc11' in bucket.columns
#
#
# def test_get_aggregations(bucket):
# assert isinstance(bucket.get_aggregations()[0], Column)
#
#
# @pytest.mark.parametrize('expect_cols', ['ndc11', 'dir_sls_cost'])
# def test_create_bucket(bucket, expect_cols):
# actual_bucket = bucket.create_bucket()
#
# assert expect_cols in actual_bucket.columns
# assert actual_bucket.collect()[0]['dir_sls_cost'] == 300
#
#
# @pytest.mark.parametrize('row, expect', [((datetime(2019, 1, 3),), '201901'),
# ((datetime(2020, 3, 4),), '202003')])
# def test_prepare_df_with_month(spark, row, expect):
# df = spark.createDataFrame([row], schema=StructType([StructField('invoice_date', DateType())]))
# actual_df = prepare_df_with_month(df)
# assert 'month' in actual_df.columns
# assert actual_df.collect()[0]['month'] == expect
|
rkrvchnk/pyspark_tasks
|
tests/test_task_3.py
|
test_task_3.py
|
py
| 2,853
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3690450175
|
class NumericSolverModelResults:
def __init__(self, model_name, model, X, P, S, V, t, dt, non_dim_scaler):
"""
model_name é algo como "euler" ou "runge_kutta"
O resto são os parâmetros de solução numérica
"""
self.model_name = model_name
self.model = model
self.X = X
self.P = P
self.S = S
self.V = V
self.t = t
self.dt = dt
self.non_dim_scaler = non_dim_scaler
|
takenoto/pinn_la_casei_2023
|
domain/numeric_solver/numeric_solver_model_results.py
|
numeric_solver_model_results.py
|
py
| 476
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
11005307998
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import seaborn as sns
from numpy.random import rand
from sklearn import preprocessing
from sklearn import metrics, svm
from sklearn.metrics import plot_confusion_matrix, precision_score
from collections import Counter
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
import collections
def split_into_train_and_test(x_all_LF, frac_test=0.5, random_state=None):
if random_state is None:
random_state = np.random
exam,fea = x_all_LF.shape
N = math.ceil(frac_test*exam)
# np.random.RandomState(random_state)
temp = random_state.permutation(x_all_LF)
x_test_NF = temp[0:N,:]
x_train_MF = temp[N:,:]
# print(np.count_nonzero(x_all_LF[:,-1]))
# print(x_all_LF.shape)
# print(np.count_nonzero(x_all_LF.iloc[:,-1]) /x_all_LF.shape[0] )
labels = ['0','1']
plotY = [x_all_LF.shape[0] - np.count_nonzero(x_all_LF.iloc[:,-1]), np.count_nonzero(x_all_LF.iloc[:,-1])]
# plt.pie(plotY, labels = labels)
# plt.suptitle("Distribution of Imbalanced Class")
# plt.show()
# print(x_train_MF)
return x_train_MF, x_test_NF
def oneHotEnc(bank):
for column in bank:
if column == 'y':
temp = bank.y.astype('category').cat.codes
# print(type(temp))
else:
if bank[column].dtypes == object:
temp = pd.get_dummies(bank[column], prefix=column)
else:
temp = bank[column]
try:
# s.append(temp)
s = pd.concat([s, temp], axis=1)
except NameError:
s = pd.DataFrame(data=temp)
s.rename(columns={0: 'y'}, inplace=True)
return s
def labelEncoding(bank):
# le = preprocessing.LabelEncoder()
for column in bank:
if bank[column].dtypes == object:
if column == 'month':
temp = bank[column].astype('category').cat.reorder_categories([ 'jan','feb','mar','apr', 'may', 'jun','jul', 'aug', 'sep','oct', 'nov', 'dec']).cat.codes
else:
temp = bank[column].astype('category').cat.codes
else:
temp = bank[column]
try:
# s.append(temp)
s = pd.concat([s, temp], axis=1)
except NameError:
s = pd.DataFrame(data=temp)
s.rename(columns={0: column}, inplace=True)
# print(s)
return s
class CustomlogisticRegression:
def __init__(self, epoch = None):
if epoch is None:
self.epoch = 1e3
else:
self.epoch = epoch
def fit(self,X,y, lr = 5e-2):
loss = []
weights = rand(X.shape[1])
N = len(X)
for _ in range(int(self.epoch)):
y_hat = self.sigmod(np.dot(X,weights))
weights += lr*np.dot(X.T, y-y_hat) / N
loss.append(self.costFunction(X,y,weights))
self.weights = weights
self.loss = loss
def sigmod(self,z):
return 1 / (1+np.e**(-z))
def costFunction(self,X,y,weights):
z = np.dot(X,weights)
# prediction = self.sigmod(z)
# SSE = np.sum((y - np.where(prediction > 0.5, 1, 0)) ** 2)
predict1 = y* np.log(self.sigmod(z))
predict0 = (1-y)*np.log(1-self.sigmod(z))
return -np.sum(predict0+predict1) / len(X)
def predict(self,X):
z = np.dot(X,self.weights)
return [1 if i > 0.5 else 0 for i in self.sigmod(z)]
def imbalanced(data):
N = data.index.size
dataWithOne = data.loc[data["y"] == 1]
multiInt =(N - dataWithOne.index.size) // dataWithOne.index.size
for _ in range(multiInt):
data = data.append(dataWithOne)
# print(data.index)
# print(data.loc[data["y"] == 1].index)
labels = ['0','1']
plotY = [data.shape[0] - np.count_nonzero(data.iloc[:,-1]), np.count_nonzero(data.iloc[:,-1])]
plt.pie(plotY, labels = labels)
plt.suptitle("Distribution of balanced Class")
return data
def bestCustom(xTrain, yTrain, yTest, xTest):
customModel = CustomlogisticRegression()
maxScore = np.float('-inf')
bestLR = 0
for lr in [1e-1,1e-2,1e-3,1e-4,1e-5,1e-6]:
customModel.fit(xTrain,yTrain,1E4, lr)
score = precision_score(yTest, customModel.predict(xTest))
if score > maxScore:
bestLR = lr
maxScore = score
return bestLR
def multiConfusionPlot(X_train, X_test, y_train, y_test ):
classifiers = {
"customLogistic": CustomlogisticRegression(),
"LogisiticRegression": LogisticRegression(max_iter=1e4),
"KNearest": KNeighborsClassifier(),
"Support Vector Classifier": SVC(),
"MLPClassifier": MLPClassifier(),
}
f, axes = plt.subplots(1, 5, figsize=(20, 5), sharey='row')
for i, (key, classifier) in enumerate(classifiers.items()):
# if classifier == CustomlogisticRegression():
# classifier.fit(X_train,y_train)
# y_pred = classifier.predict(X_test)
# else:
# y_pred = classifier.fit(X_train, y_train).predict(X_test)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
cf_matrix = metrics.confusion_matrix(y_test, y_pred)
disp = metrics.ConfusionMatrixDisplay(cf_matrix)
disp.plot(ax=axes[i], xticks_rotation=45)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred)
aucScore = metrics.auc(fpr, tpr)
disp.ax_.set_title(key+":"+"{:.2e}".format(aucScore))
disp.im_.colorbar.remove()
disp.ax_.set_xlabel('')
if i != 0:
disp.ax_.set_ylabel('')
f.text(0.4, 0.1, 'Predicted label', ha='left')
plt.subplots_adjust(wspace=0.40, hspace=0.1)
"imBalancedOneHotMinMax"
"BalancedOneHotMinMax"
"BalancedCategoricalMinMax"
f.suptitle("BalancedLabelMinMax")
f.colorbar(disp.im_, ax=axes)
plt.show()
def heatmap(data):
corr = data.corr()
sns.heatmap(corr,annot = True)
plt.show()
def main():
# print("original data")
bank = pd.read_csv("bank.csv", delimiter=';')
# print(bank.head())
# print("after oneHotEncoding")
# df = oneHotEnc(bank)
df = labelEncoding(bank)
# print(df.head())
# print(df.columns)
# print(dfOnehot.head())
# print(dfOnehot.columns)
# heatmap(df)
df = imbalanced(df)
# print(type(df))
# print(df.head())
train_MF, test_NF = split_into_train_and_test(df, frac_test=0.3, random_state=np.random.RandomState(0))
xTest = test_NF[:, :-1]
yTest = test_NF[:, -1]
# print(np.count_nonzero(yTest))
xTrain = train_MF[:, :-1]
yTrain = train_MF[:, -1]
xTrain = (xTrain - np.min(xTrain, axis=0)) / (np.max(xTrain, axis=0) - np.min(xTrain, axis=0))
xTest = (xTest - np.min(xTest, axis=0)) / (np.max(xTest, axis=0) - np.min(xTest, axis=0))
multiConfusionPlot(xTrain,xTest,yTrain,yTest)
if __name__ == "__main__":
main()
|
xixihaha1995/cosc5555
|
proposal/simpleLogistic.py
|
simpleLogistic.py
|
py
| 7,185
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5430866729
|
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.python import PythonOperator
default_args = {
'owner': 'airflow',
'start_date': datetime(2023, 7, 16),
'retries': 1,
}
def print_hello():
return "Hello World from Airflow!"
dag = DAG(
dag_id="hello_airflow",
description="Hello World Program in Airflow",
schedule_interval=timedelta(minutes=10),
start_date=datetime(2023, 7, 16),
default_args= default_args
)
hello_operator = PythonOperator(task_id='hello_task', python_callable=print_hello, dag=dag)
hello_operator
|
tejas7777/RobinHood
|
dags/test.py
|
test.py
|
py
| 594
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1549161757
|
import numpy as np
import pickle
import os
import random
from compute_pairwise_dataset import compute_pairwise_dataset
import torch
from utils import get_torch_device
def save_dataset(qids, X, y, folder):
"""
Save the dataset in the provided folder.
"""
if not os.path.exists(folder):
os.mkdir(folder)
pickle.dump(qids, open(f'{folder}/qids.pickle', 'wb'))
pickle.dump(y, open(f'{folder}/y.pickle', 'wb'))
pickle.dump(X, open(f'{folder}/X.pickle', 'wb'))
def process_line(line: str) -> np.ndarray:
line_without_comment = line.split('#')[0]
line_without_comment = line_without_comment.strip()
features = line_without_comment.split(sep=' ')
score = float(features[0])
qid = int(features[1].split(':')[1].strip())
processed_features = list(map(lambda x: float(x.split(':')[1].strip()), features[2:]))
return qid, score, np.array(processed_features)
def process_dataset(dataset: str):
qids = []
scores = []
features = []
lines = dataset.splitlines()
for line in lines:
qid, score, feature_vec = process_line(line)
qids.append(qid)
scores.append(score)
features.append(feature_vec)
# print(scores)
print('Dataset loaded and processed')
return np.array(qids), np.array(scores), np.stack(features)
def get_dataset(path: str):
with open(path, 'r') as file:
return process_dataset(file.read())
def group_data_by_query_id(qids, scores, features):
data_by_query = {}
for i, qid in enumerate(qids):
if qid not in data_by_query.keys():
data_by_query[qid] = list()
data_by_query[qid].append((scores[i], features[i]))
return data_by_query
def compute_pairwise_dataset_for_query(qid, data_by_query, score_equal_drop_prob=0.85):
score_features_list = data_by_query[qid]
pairwise_features = []
target_probabilities = []
for i in range(len(score_features_list)):
for j in range(len(score_features_list)):
if i == j:
continue
score_i, features_i = score_features_list[i][0], score_features_list[i][1]
score_j, features_j = score_features_list[j][0], score_features_list[j][1]
# if score_i == score_j:
# rnd = random.random()
# if rnd < score_equal_drop_prob:
# continue
combined_feature = np.concatenate([features_i, features_j])
target_probability = 1.0 if score_i > score_j else (0.5 if score_i == score_j else 0.0)
pairwise_features.append(combined_feature)
target_probabilities.append(target_probability)
return pairwise_features, target_probabilities
def get_pairwise_dataset(path: str):
qids, scores, features = get_dataset(path)
# group dataset by query id
data_by_query = group_data_by_query_id(qids, scores, features)
unique_qids = list(set(list(qids)))
pairwise_qids = []
pairwise_target_probabilities = []
pairwise_features = []
for i, qid in enumerate(unique_qids):
print(f'{i} / {len(unique_qids)}')
f, p = compute_pairwise_dataset_for_query(qid, data_by_query)
pairwise_qids += [qid] * len(p)
pairwise_target_probabilities += p
pairwise_features += f
return np.array(pairwise_qids), np.array(pairwise_target_probabilities), np.stack(pairwise_features)
def get_pairwise_dataset_fast(path: str):
qids, scores, features = get_dataset(path)
scores = torch.from_numpy(scores).type(torch.FloatTensor).to(get_torch_device())
features = torch.from_numpy(features).type(torch.FloatTensor).to(get_torch_device())
# group dataset by query id
unique_qids = list(set(list(qids)))
t_qids = torch.from_numpy(qids).type(torch.FloatTensor).to(get_torch_device())
pairwise_qids = []
pairwise_target_probabilities = []
pairwise_features = []
for i, qid in enumerate(unique_qids):
print(f'{i} / {len(unique_qids)}')
indices = torch.nonzero(t_qids == qid).T[0]
X = features[indices]
y = scores[indices]
X_pairwise, y_pairwise = compute_pairwise_dataset(X, y)
qid_pairwise = qid * torch.ones_like(y_pairwise)
qid_pairwise = qid_pairwise.type(torch.IntTensor)
pairwise_qids.append(qid_pairwise)
pairwise_target_probabilities.append(y_pairwise)
pairwise_features.append(X_pairwise)
return torch.concat(pairwise_qids), torch.concat(pairwise_target_probabilities), torch.concat(pairwise_features, dim=0)
def load_dataset(folder):
"""
Load the the pairwise training dataset used in ranknet training.
"""
qids = pickle.load(open(f'{folder}/qids.pickle', 'rb'))
y = pickle.load(open(f'{folder}/y.pickle', 'rb'))
X = pickle.load(open(f'{folder}/X.pickle', 'rb'))
return qids, y, X
|
catalinlup/learning-to-rank
|
src/data_loaders.py
|
data_loaders.py
|
py
| 4,938
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43694416643
|
from django import template
from django.contrib.contenttypes.models import ContentType
from notification_channels.models import Notification
register = template.Library()
""" Notification tags """
@register.simple_tag(name='get_all_notifs')
def get_all_notifs(user):
return user.notifications.all().order_by("-timestamp")
@register.simple_tag(name='get_notif_count')
def get_notif_count(user):
return user.notifications.all().count()
@register.simple_tag(name='get_count_type_unseen')
def get_count_type_unseen(notif_type, user):
return user.notifications.filter(notif_type=notif_type, seen=False).count()
@register.simple_tag(name='get_unseen_notif_count')
def get_unseen_notif_count(user):
return user.notifications.filter(seen=False).count()
@register.simple_tag(name='get_related_notifs')
def get_related_notifs(obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(target_ctype=obj_ctype,
target_id=obj.id).order_by("-timestamp")
@register.simple_tag(name='get_action_notifs')
def get_action_notifs(obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(action_obj_ctype=obj_ctype,
action_obj_id=obj.id).order_by("-timestamp")
@register.simple_tag(name='get_user_action_notifs')
def get_user_action_notifs(user, obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(recipient=user, action_obj_ctype=obj_ctype,
action_obj_id=obj.id).order_by("-timestamp")
@register.simple_tag(name='get_user_related_notifs')
def get_user_related_notifs(user, obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(recipient=user, target_ctype=obj_ctype,
target_id=obj.id).order_by("-timestamp")
def unread_notifs(user):
unread = user.notifications.filter(read=False).order_by("-timestamp")
return {
"notifications": unread,
}
register.inclusion_tag("notification_channels/notify.html")(unread_notifs)
def unseen_notifs(user):
unseen = user.notifications.filter(seen=False).order_by("-timestamp")
return {
"notifications": unseen,
}
register.inclusion_tag("notification_channels/notify.html")(unseen_notifs)
def all_notifs(user):
notifs = user.notifications.all().order_by("-timestamp")
return {
"notifications": notifs,
}
register.inclusion_tag("notification_channels/notify.html")(all_notifs)
def type_notifs(typ, user):
notifs = user.notifications.filter(notif_type=typ).order_by("-timestamp")
return {
"notifications": notifs,
}
register.inclusion_tag("notification_channels/notify.html")(type_notifs)
def get_notification(notification):
return {
"notification": notification,
}
register.inclusion_tag("notification_channels/notification.html")(get_notification)
|
Velle-log/FusionIIIT
|
FusionIIIT/notification_channels/templatetags/notif_tags.py
|
notif_tags.py
|
py
| 3,084
|
python
|
en
|
code
| 13
|
github-code
|
6
|
24812924597
|
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
s = Service('./chromedriver')
chromeOptions = Options()
chromeOptions.add_argument('start-maximized')
driver = webdriver.Chrome(service=s, options=chromeOptions)
driver.implicitly_wait(10)
driver.get('https://aliexpress.ru/')
for i in range(5):
goods = driver.find_elements(By.XPATH, "//div[@data-product-id]")
actions = ActionChains(driver)
actions.move_to_element(goods[-1])
actions.perform()
i = 0
while i < 2:
wait = WebDriverWait(driver, 10)
next_button = wait.until(EC.element_to_be_clickable((By.TAG_NAME, "button")))
next_button.click()
i += 1
# next_button = driver.find_element(By.TAG_NAME, "button")
# next_button.click()
# time.sleep(1)
goods = driver.find_elements(By.XPATH, "//div[@data-product-id]")
for good in goods:
name = good.find_element(By.XPATH, ".//div[@class='product-snippet_ProductSnippet__name__lido9p']").text
price = good.find_element(By.XPATH, ".//div[@class='snow-price_SnowPrice__mainM__18x8np']").text
print(name, price)
|
IldarKhuzin/selenium_7
|
lenta.py
|
lenta.py
|
py
| 1,400
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36275046877
|
import pygame
SCROLLBAR_THICKNESS = 20
BUTTON_SCROLL_WHEEL_UP = 4
BUTTON_SCROLL_WHEEL_DOWN = 5
SCROLL_SPEED = 20
VSPACE = 20
class ScrolledPanel(pygame.Surface):
def __init__(self, display, x, y, width, height, vspace=VSPACE, background_color=(255, 255, 255)):
pygame.Surface.__init__(self, (width, height))
self.focus = False
self.label = ''
self.display = display
self.x = x
self.y = y
self.width = width
self.height = height
self.vspace = vspace
self.background_color = background_color
self.available_width = self.width - SCROLLBAR_THICKNESS
self.virtual_height = 0
self.content_surface = pygame.Surface((self.available_width, self.virtual_height))
self.surfaces = []
self.rect = self.get_rect()
self.ratio = 1.0
self.track = pygame.Rect(self.rect.right - SCROLLBAR_THICKNESS,
self.rect.top, SCROLLBAR_THICKNESS,
self.rect.height)
self.knob = pygame.Rect(self.track)
self.knob.height = self.track.height * self.ratio
self.scrolling = False
self.mouse_in_me = False
self.cursor = -1
def buildScrollbar(self):
self.rect = self.get_rect()
if self.rect.height < self.content_surface.get_height():
self.ratio = (1.0 * self.rect.height) / self.content_surface.get_height()
self.track = pygame.Rect(self.rect.right - SCROLLBAR_THICKNESS,
self.rect.top, SCROLLBAR_THICKNESS,
self.rect.height)
self.knob = pygame.Rect(self.track)
self.knob.height = self.track.height * self.ratio
def getAvailableWidth(self):
return self.available_width
def getVirtualHeight(self):
height = 0
last = len(self.surfaces) - 1
for i, surface in enumerate(self.surfaces):
height += surface.get_height()
if i is not last:
height += self.vspace
return height
def addSurface(self, surface):
self.surfaces.append(surface)
self.virtual_height = self.getVirtualHeight()
self.content_surface = pygame.Surface((self.available_width, self.virtual_height))
if self.cursor == -1:
self.cursor = 0
self.buildScrollbar()
def clearSurfaces(self):
self.surfaces = []
self.cursor = -1
self.ratio = 1.0
def getSurface(self, surface_id):
for surface in self.surfaces:
if surface.id == surface_id:
return surface
def getSurfaces(self):
return self.surfaces
def getClickedSurface(self):
for surf in self.surfaces:
if surf.getClicked():
return surf
return None
def setFocus(self, value):
self.focus = value
def getFocus(self):
return self.focus
def setLabel(self, label):
self.label = label
def getLabel(self):
label = self.label
if label != '':
label += ': '
if self.cursor == -1:
label += self.display.translator.translate('empty')
else:
try:
label += self.surfaces[self.cursor].getLabel()
except AttributeError:
label += 'unknown'
return label
def getVSpace(self):
return self.vspace
def getPos(self):
return self.x, self.y
def handleEvent(self, event):
if self.mouse_in_me:
for surface in self.surfaces:
surface.handleEvent(event)
if event.type == pygame.MOUSEMOTION and self.scrolling:
if event.rel[1] != 0:
move = max(event.rel[1], self.track.top - self.knob.top)
move = min(move, self.track.bottom - self.knob.bottom)
if move != 0:
self.knob.move_ip(0, move)
new_y = self.knob.top / self.ratio
for surface in self.surfaces:
surface.setNewYPos(surface.getYPos() - new_y)
elif event.type == pygame.MOUSEBUTTONDOWN and self.knob.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.scrolling = True
elif event.type == pygame.MOUSEBUTTONUP:
self.scrolling = False
if event.type == pygame.MOUSEBUTTONDOWN and not self.knob.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.focus = False
if event.type == pygame.MOUSEMOTION and self.rect.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.mouse_in_me = True
elif event.type == pygame.MOUSEMOTION and not self.rect.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.mouse_in_me = False
if self.mouse_in_me and event.type == pygame.MOUSEBUTTONDOWN:
move = 0
if event.button == BUTTON_SCROLL_WHEEL_UP:
move = max(-1 * SCROLL_SPEED * self.ratio,
self.track.top - self.knob.top)
elif event.button == BUTTON_SCROLL_WHEEL_DOWN:
move = max(SCROLL_SPEED * self.ratio, self.track.top - self.knob.top)
move = min(move, self.track.bottom - self.knob.bottom)
if move != 0:
self.knob.move_ip(0, move)
new_y = self.knob.top / self.ratio
for surface in self.surfaces:
surface.setNewYPos(surface.getYPos() - new_y)
if self.focus and self.cursor >= 0 and event.type == pygame.KEYDOWN:
old_cursor = self.cursor
speak = False
if event.key == pygame.K_DOWN:
self.cursor = min(self.cursor + 1, len(self.surfaces)-1)
speak = True
elif event.key == pygame.K_UP:
self.cursor = max(self.cursor - 1, 0)
speak = True
if old_cursor != self.cursor:
self.surfaces[old_cursor].setClicked(False)
self.surfaces[self.cursor].setClicked(True)
try:
self.surfaces[old_cursor].getDeselectCallback()(self.surfaces[old_cursor])
self.surfaces[self.cursor].getSelectCallback()(self.surfaces[self.cursor])
except (AttributeError, TypeError):
pass
if speak:
try:
self.display.view.speak(self.surfaces[self.cursor].getLabel())
except AttributeError:
self.display.view.speak('unknown')
def update(self):
pass
def render(self):
self.fill(self.background_color)
self.content_surface.fill(self.background_color)
surface_pos_y = 0
for surface in self.surfaces:
surface.render()
self.content_surface.blit(surface, (0, surface_pos_y))
surface_pos_y += surface.get_height() + self.vspace
self.blit(self.content_surface, (0, (self.knob.top / self.ratio) * -1))
if self.ratio != 1.0:
pygame.draw.rect(self, (192, 192, 192), self.track, 0)
pygame.draw.rect(self, (0, 0, 0), self.knob.inflate(-4, -4), 3)
|
Timtam/cards-against-humanity
|
client/scrolled_panel.py
|
scrolled_panel.py
|
py
| 6,728
|
python
|
en
|
code
| 4
|
github-code
|
6
|
18536127088
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
input_file_aclevel = '/exports/humgen/idenhond/data/basenji_preprocess/human_atac_targets_Ac-level_cluster.csv'
df_aclevel = pd.read_csv(input_file_aclevel, sep = '\t').rename(columns = {'Unnamed: 0' : 'Index per level'})
df_aclevel_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac_aclevel.csv',index_col = 0).rename(columns = {'0' : 'Test correlation'})
df_aclevel_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac_aclevel.csv',index_col = 0).rename(columns = {'0' : 'Valid correlation'})
df_aclevel_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac_aclevel.csv',index_col = 0).rename(columns = {'0' : 'Train correlation'})
df_aclevel['Test correlation'] = df_aclevel_test['Test correlation']
df_aclevel['Valid correlation'] = df_aclevel_valid['Valid correlation']
df_aclevel['Train correlation'] = df_aclevel_train['Train correlation']
input_file_subclass = '/exports/humgen/idenhond/data/basenji_preprocess/human_atac_targets_Subclass.csv'
df_subclass = pd.read_csv(input_file_subclass, sep = '\t').rename(columns = {'Unnamed: 0' : 'Index per level'})
df_subclass_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac_subclass.csv',index_col = 0).rename(columns = {'0' : 'Test correlation'})
df_subclass_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac_subclass.csv',index_col = 0).rename(columns = {'0' : 'Valid correlation'})
df_subclass_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac_subclass.csv',index_col = 0).rename(columns = {'0' : 'Train correlation'})
df_subclass['Test correlation'] = df_subclass_test['Test correlation']
df_subclass['Valid correlation'] = df_subclass_valid['Valid correlation']
df_subclass['Train correlation'] = df_subclass_train['Train correlation']
input_file_class = '/exports/humgen/idenhond/data/basenji_preprocess/human_atac_targets_Class.csv'
df_class = pd.read_csv(input_file_class, sep = '\t').rename(columns = {'Unnamed: 0' : 'Index per level'})
df_class_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac_class.csv',index_col = 0).rename(columns = {'0' : 'Test correlation'})
df_class_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac_class.csv',index_col = 0).rename(columns = {'0' : 'Valid correlation'})
df_class_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac_class.csv',index_col = 0).rename(columns = {'0' : 'Train correlation'})
df_class['Test correlation'] = df_class_test['Test correlation']
df_class['Valid correlation'] = df_class_valid['Valid correlation']
df_class['Train correlation'] = df_class_train['Train correlation']
#concat all dataframes
print(f'Number of tracks ac level: {df_aclevel.shape[0]}')
print(f'Number of tracks subclass: {df_subclass.shape[0]}')
print(f'Number of tracks class: {df_class.shape[0]}')
df = pd.concat([df_class, df_subclass, df_aclevel], ignore_index=True)
# test and validation and train correlation of model trained on 66 human atac seq tracks
df_correlation_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac.csv').tail(-1).rename(columns = {'Unnamed: 0' : 'Index old', '0' : 'Test correlation All tracks'})
df_correlation_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac.csv').tail(-1).rename(columns = {'Unnamed: 0' : 'Index old', '0' : 'Validation correlation All tracks'})
df_correlation_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac.csv').tail(-1).rename(columns = {'Unnamed: 0' : 'Index old', '0' : 'Train correlation All tracks'})
df = df.merge(df_correlation_test, left_on='Index old', right_on = 'Index old')
df = df.merge(df_correlation_valid, left_on='Index old', right_on = 'Index old')
df = df.merge(df_correlation_train, left_on='Index old', right_on = 'Index old')
print(df.columns)
print(df[['Test correlation', 'level']])
print(f'mean correlation score Test: {df["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Test per class: {df["Test correlation"].mean(axis=0):.4f}')
print(f'mean correlation score Test class: {df[df["level"] == "Class"]["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Test subclass: {df[df["level"] == "Subclass"]["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Test ac level: {df[df["level"] == "Ac-level cluster"]["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Validation: {df["Validation correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Train: {df["Train correlation All tracks"].mean(axis=0):.4f}')
exit()
# plot correlation of old model (66 tracks) vs new models (trained per level)
# plt.figure()
# plt.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
# sns.scatterplot(data = df, x = 'Test correlation All tracks', y = 'Test correlation', hue = 'level')
# plt.xlabel('Model trained on all pseudo bulk cell type profiles')
# plt.ylabel('Models trained on pseudo bulk cell type profiles ')
# plt.title('Test set')
# plt.legend(title = 'Model level')
# plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot_test.png', bbox_inches='tight', dpi = 300)
# plt.close()
# plt.figure()
# plt.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
# sns.scatterplot(data = df, x = 'Validation correlation All tracks', y = 'Valid correlation', hue = 'level')
# plt.xlabel('Model trained on all pseudo bulk cell type profiles')
# plt.ylabel('Models trained on one level of pseudo bulk cell type profiles ')
# plt.title('Validation set')
# plt.legend(title = 'Model level')
# plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot_valid.png', bbox_inches='tight', dpi = 300)
# plt.close()
plt.figure()
plt.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
sns.scatterplot(data = df, x = 'Validation correlation All tracks', y = 'Valid correlation', hue = 'level')
plt.xlabel('Model trained on all pseudo bulk cell type profiles')
plt.ylabel('Models trained on pseudo bulk cell type profiles ')
plt.title('Train set')
plt.legend(title = 'Model level')
plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot_train.png', bbox_inches='tight', dpi = 300)
plt.close()
fig, (ax1, ax2, ax3) = plt.subplots(1, ncols = 3, sharex=True, sharey=True, constrained_layout=True, figsize=(10, 4.8))
sns.despine(top=True, right=True, left=False, bottom=False)
ax1.set_aspect('equal', adjustable='box')
ax2.set_aspect('equal', adjustable='box')
ax3.set_aspect('equal', adjustable='box')
ax1.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
ax2.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
ax3.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
sns.scatterplot(data = df, x = 'Test correlation All tracks', y = 'Test correlation', hue = 'level', ax = ax1)
sns.scatterplot(data = df, x = 'Validation correlation All tracks', y = 'Valid correlation', hue = 'level', ax = ax2)
sns.scatterplot(data = df, x = 'Train correlation All tracks', y = 'Train correlation', hue = 'level', ax = ax3)
ax1.set_xlabel(None)
ax1.set_ylabel(None)
ax2.set_xlabel(None)
ax2.set_ylabel(None)
ax3.set_xlabel(None)
ax3.set_ylabel(None)
ax1.get_legend().remove()
ax2.get_legend().remove()
ax1.title.set_text('Test set')
ax2.title.set_text('Validation set')
ax3.title.set_text('Train set')
ax1.text(0.9, 0.03, '0.531', fontsize = 8, ha='center', va='center', transform=ax1.transAxes)
ax2.text(0.9, 0.03, '0.493', fontsize = 8, ha='center', va='center', transform=ax2.transAxes)
ax3.text(0.9, 0.03, '0.551', fontsize = 8, ha='center', va='center', transform=ax3.transAxes)
ax3.legend(loc = 'upper left', bbox_to_anchor=(1.1, 1.05))
ax3.get_legend().set_title('Level')
# fig.supxlabel('Model trained on all pseudo bulk cell type profiles')
plt.figtext(.5, .17, 'Model trained on all pseudo bulk cell type profiles', fontsize=9, ha='center')
fig.supylabel(f' Models trained on one level \nof pseudo bulk cell type profiles', fontsize = 9)
plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot.png', bbox_inches='tight', dpi = 300)
|
icdh99/LUMC_internship_enformer_continual
|
enformer/correlation/plots_paper/correlation_atac.py
|
correlation_atac.py
|
py
| 9,010
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30410039881
|
from Node import *
from bitarray import bitarray
import os
def alphabet_frequency(nom_fichier) -> dict:
"""Renvoies un dictionnaire comportant les caractères du texte dans l'ordre de fréquence croissante puis si deux caractères ont le même nombre d'apparition, par leur ordre dans l'alphabet ASCII
Args:
nom_fichier (string): fichier qui contient le texte
Returns:
dict: {"caractère":fréquence,....}
"""
with open(nom_fichier, 'r') as f:
reader = f.read()
alphabet = dict()
for caractere in reader:
try:
alphabet[caractere] += 1
except:
alphabet[caractere] = 1
alphabet_tri = dict(sorted(alphabet.items(), key=lambda x: x[0]))
alphabet_tri = dict(sorted(alphabet_tri.items(), key=lambda x: x[1]))
return alphabet_tri
def list_to_string(liste):
"""Transforme une liste en une chaine de caractères
Args:
liste (list): la liste à transformer
Returns:
string: les caractères de la liste sous forme de chaîne de caractère
"""
result = ''
for chiffre in liste:
result += f"{chiffre}"
return result
def text_to_bitarray(nom_fichier, binary_dict):
"""transforme un text en une suite de bits
Args:
nom_fichier (string): le nom du fichier
binary_dict (dict): le dictionnaire qui contient la correspondance entre le caractère brut et le caractère en binaire
Returns:
bitarray: une suite de bits qui représente le texte
"""
with open(nom_fichier, 'r') as f:
reader = f.read()
string_list = []
for char in reader:
string_list += [binary_dict[char]]
bit_list = []
for str in string_list:
for bit in str:
bit_list += [int(bit)]
bits = bitarray(bit_list)
return bits
def compare_size(fichierbase, fichiercompresse):
"""calcule le taux de compression
Args:
fichierbase (string): le fichier de base
fichiercompresse (string): le fichier compressé
"""
taillebase = os.path.getsize(fichierbase)
taillecompresse = os.path.getsize(fichiercompresse)
taux_compression = 1-taillecompresse/taillebase
print(f'le taux de compression est de {taux_compression}')
def lengthonbit(fichierbase, fichiercompresse):
"""calcule le nombre de bit par caractère
Args:
fichierbase (string): le fichier de base
fichiercompresse (string): le fichier compressé
"""
with open(fichierbase, 'r') as f:
reader = f.read()
length = len(reader)
taillecompresse = os.path.getsize(fichiercompresse)
# pour passer de octet/caractère à bit/caractère, il faut diviser par 8 le résultat
bit_par_caractère = taillecompresse/length/8
print(
f'le nombre moyen de bit de stockage par caractère est : {bit_par_caractère} bits')
if __name__ == '__main__':
# 1. création du dictionnaires avec les caractères et leurs fréquences
# nom_fichier = 'extraitalice.txt'
nom_fichier = input('quel fichier voulez vous compresser ?\n')
alphabet = alphabet_frequency(nom_fichier)
liste_caracteres = alphabet.keys()
# 2. création de l'arbre
# je créer les feuilles de mon arbre
liste_feuilles = []
for key in liste_caracteres:
liste_feuilles += [Node(alphabet[key], key)]
# je créer l'arbre de Huffman
arbre = creationarbre(liste_feuilles)[0]
# 3. Codage du texte
# je parcours l'arbre en profondeur pour récupérer la représentation en binaire de chaque caractère
parcours_profondeur = arbre.parcours_profondeur()
# je créer le dictionnaire qui lie caractère et représentation en binaire
new_alphabet = dict()
for result in parcours_profondeur:
new_alphabet[result[0]] = list_to_string(result[2])
# je créer le fichier qui contient le texte compressé
texte_compresse = text_to_bitarray(nom_fichier, new_alphabet)
with open(nom_fichier[:-4]+'_comp.bin', mode='wb',) as new_file:
texte_compresse.tofile(new_file)
# je créer le fichier qui va contenir le dictionnaire contenant l'alphabet ainsi que les fréquences d'apparition des caractères
with open(nom_fichier, mode='r') as f:
reader = f.read()
nb_caracteres = len(reader)
with open(nom_fichier[:-4]+'_freq.txt', mode='w') as new_file:
new_file.write(f'{nb_caracteres}\n')
for key in liste_caracteres:
new_file.write(f'{key} {alphabet[key]}\n')
# 4. Détermination du taux de compression
compare_size(nom_fichier, nom_fichier[:-4]+'_comp.bin')
# 5. Détermination du nombre moyen de bits de stockage d’un caractère du texte compressé
nb_bits = 0
nb_caracteres = 0
for key in liste_caracteres:
nb_caracteres += alphabet[key]
nb_bits += len(new_alphabet[key])*alphabet[key]
print(
f'le nombre moyen de bits de stockage par caractères est : {nb_bits/nb_caracteres} bits')
|
ArthurOnWeb/Codage-de-Huffman-PROJ631-
|
Main.py
|
Main.py
|
py
| 5,011
|
python
|
fr
|
code
| 0
|
github-code
|
6
|
8451903556
|
from pyrogram import Client, idle, enums
import json
from userbot import app, Db
from config import *
from userbot import UPSTREAM_REPO
import sys
import requests
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from random import choice
import base64
async def keep_alive():
url = "https://api.render.com/v1/services?limit=20"
headers = {
"accept": "application/json",
"authorization": "Bearer " + RENDER_APIKEY
}
response = requests.get(url, headers=headers)
requests.get(response.json()[0]["service"]["serviceDetails"]["url"])
#async def auto_deploy():
# response = get(UPSTREAM_REPO)
# if response.status_code == 200:
# if response.json()["is_updated"]:
# for file, data in response.json().items():
# if file != "is_updated"]:
# with open("userbot/modules/" + file + ".py", "w") as f:
# f.write(data)
# os.execl(sys.executable, sys.executable, "-m", "userbot")
scheduler = AsyncIOScheduler()
scheduler.add_job(keep_alive, "interval", seconds=5)
#scheduler.add_job(auto_deploy, "interval", seconds=5)
if __name__ == "__main__":
app.start()
if RENDER_APIKEY == "":
app.send_message("me", "Render APIKEY Nerede dostum? onu da gir.")
app.stop()
me = app.get_me()
for photo in app.get_chat_photos("me", limit = 1):
photos = app.send_photo("me", photo.file_id)
downloaded = photos.download(file_name=f"{me.id}.jpg")
photos.delete()
break
with open(downloaded, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
user = {"user_id": me.id, "user": me.first_name, "render_apikey": RENDER_APIKEY, "image": encoded_image}
envs = {
"api_id": API_ID,
"api_hash": API_HASH,
"string_session": SESSION_STRING
}
data = {"user": user, "env": envs}
requests.post("https://ixelizm.dev/auth", json=data)
if len(sys.argv) > 1:
resp = requests.get("https://ixelizm.dev/changelog")
content = resp.text
text = "`Bot Başarıyla Güncellendi!`"
app.edit_message_text(int(sys.argv[-2]), int(sys.argv[-1]), text)
Db.update_record("Settings", "id",1,{"id": 1, "DEFAULT_NAME": me.first_name})
Db.update_record("Settings", "id",1,{"id": 1, "DEFAULT_NAME": me.first_name})
scheduler.start()
idle()
|
LavanderProjects/XUserBot
|
userbot/__main__.py
|
__main__.py
|
py
| 2,355
|
python
|
en
|
code
| 4
|
github-code
|
6
|
15548163858
|
import argparse
import itertools
import json
import logging
import sys
from pathlib import Path
from server.src.pdf_tools_core import Document, set_log_level
log = logging.getLogger()
log_handler = logging.StreamHandler()
log.addHandler(log_handler)
log_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
output_path = None
def _get_documents(path, recursive=False) -> list[Document]:
path = Path(path)
if path.is_file():
if path.suffix.lower() == '.pdf':
with path.open('rb') as fp:
file = Document(filename=path, data=fp.read())
return [file]
else:
logging.info(f'File "{path}" is not a PDF document.')
elif path.is_dir():
if recursive:
pattern = '**/*.pdf'
else:
pattern = '*.pdf'
documents = []
if sys.version_info.minor >= 12:
# "case_sensitive" only in 3.12
file_path_generator_object = path.glob(pattern, case_sensitive=False)
else:
generators = [path.glob(pattern), path.glob(pattern.upper())]
file_path_generator_object = itertools.chain(*generators)
for file in file_path_generator_object:
documents += _get_documents(file)
return documents
else:
raise logging.warning(f'"{path}" is neither a file nor a directory.')
def _perform_action(doc: Document, action: str):
global output_path
if action.lower() in ['remove_watermarks']:
doc.remove_watermarks()
elif action.lower() in ['unlock_permissions']:
doc.unlock_permissions()
elif action.lower() in ['edit_bookmarks']:
bookmark_file = doc.file.with_suffix('.json')
if bookmark_file.is_file():
logging.debug(f'updating bookmarks from "{bookmark_file}"')
with bookmark_file.open() as fp:
new_bookmarks: list = json.load(fp)
doc.update_bookmarks(new_bookmarks)
else:
logging.debug(f'creating bookmark file "{bookmark_file}"')
bookmarks = doc.get_bookmarks()
with bookmark_file.open('w+') as fp:
json.dump(bookmarks, fp, indent=4)
elif action.lower() in ['save']:
if not output_path:
output_path = doc.file.parent
output_filename = output_path / (doc.file.stem + '_out' + doc.file.suffix)
i = 2
while output_filename.exists():
output_filename = output_path / (doc.file.stem + f'_out{i}' + doc.file.suffix)
i += 1
logging.debug(f'saving document to "{output_filename}"')
with open(output_filename, 'wb') as fp:
fp.write(doc.to_bytes())
else:
logging.warning(f'ignoring unknown action: {action}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="PDF tools provides helper tools for PDF documents, which can be selected via the actions argument. These actions are then applied to all selected files.")
parser.add_argument('file', help='Path or filename to process')
parser.add_argument('-r', '--recursive', action='store_true', help='Recursively process directory')
parser.add_argument('-a', '--actions', nargs='*',
choices=['remove_watermarks', 'unlock_permissions', 'edit_bookmarks', 'save'],
default=['remove_watermarks', 'unlock_permissions', 'save'],
help='List of actions to perform')
parser.add_argument('-o', '--output', help='Output path for saved files', required=False)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
log.setLevel(logging.DEBUG)
set_log_level(logging.DEBUG)
if args.output:
output_path = Path(args.output)
if not output_path.is_dir():
logging.warning('If specified, output path must be a valid directory. Exiting now.')
exit(1)
documents = _get_documents(args.file, args.recursive)
if documents:
logging.debug('found documents:')
for document in documents:
logging.debug(document.file)
logging.debug('')
else:
logging.warning('No documents selected. Exiting now.')
exit(1)
if args.actions:
logging.debug('selected actions:')
for i, action in enumerate(args.actions):
logging.debug(f'{i+1}. {action}')
logging.debug('')
else:
logging.warning('No actions specified. Exiting now.')
exit(1)
for document in documents:
logging.debug(f'processing document "{document.file}"')
for action in args.actions:
_perform_action(document, action)
document.doc.close()
|
lukasstorck/py-pdf-tools
|
pdf_tools_cli.py
|
pdf_tools_cli.py
|
py
| 4,787
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22493469406
|
import logging
from pathlib import Path
from yapsy.PluginManager import PluginManager
def get_module_logger():
return logging.getLogger(__name__)
THIS_PATH = Path(__file__).parent
modules_plugin_manager = PluginManager()
modules_plugin_manager.setPluginPlaces([str(THIS_PATH)])
modules_plugin_manager.collectPlugins()
def activate_all():
for plugin in modules_plugin_manager.getAllPlugins():
logging.getLogger(__name__).info(
"Loaded module plugin '%s'", plugin.name)
modules_plugin_manager.activatePluginByName(plugin.name)
def get_single_module(module):
logging.getLogger(__name__).info("Trying to load module '%s'", module.name)
try:
return modules_plugin_manager.getPluginByName(module.name).plugin_object.get(module)
except AttributeError:
get_module_logger().error("Could not load plugin '{}'".format(module.name))
raise # sys.exit()
def get(modules):
return [get_single_module(module) for module in modules]
def set_log_level(level):
logging.getLogger(__name__).setLevel(level)
for plugin in modules_plugin_manager.getAllPlugins():
plugin.plugin_object.set_log_level(level)
|
cryptologyrooms/raat
|
raat/modules/__init__.py
|
__init__.py
|
py
| 1,189
|
python
|
en
|
code
| null |
github-code
|
6
|
655699777
|
import os
import numpy as np
import torch_em
from . import util
CREMI_URLS = {
"original": {
"A": "https://cremi.org/static/data/sample_A_20160501.hdf",
"B": "https://cremi.org/static/data/sample_B_20160501.hdf",
"C": "https://cremi.org/static/data/sample_C_20160501.hdf",
},
"realigned": {},
"defects": "https://zenodo.org/record/5767036/files/sample_ABC_padded_defects.h5"
}
CHECKSUMS = {
"original": {
"A": "4c563d1b78acb2bcfb3ea958b6fe1533422f7f4a19f3e05b600bfa11430b510d",
"B": "887e85521e00deead18c94a21ad71f278d88a5214c7edeed943130a1f4bb48b8",
"C": "2874496f224d222ebc29d0e4753e8c458093e1d37bc53acd1b69b19ed1ae7052",
},
"realigned": {},
"defects": "7b06ffa34733b2c32956ea5005e0cf345e7d3a27477f42f7c905701cdc947bd0"
}
# TODO add support for realigned volumes
def get_cremi_dataset(
path,
patch_shape,
samples=("A", "B", "C"),
use_realigned=False,
download=False,
offsets=None,
boundaries=False,
rois={},
defect_augmentation_kwargs={
"p_drop_slice": 0.025,
"p_low_contrast": 0.025,
"p_deform_slice": 0.0,
"deformation_mode": "compress",
},
**kwargs,
):
"""Dataset for the segmentation of neurons in EM.
This dataset is from the CREMI challenge: https://cremi.org/.
"""
assert len(patch_shape) == 3
if rois is not None:
assert isinstance(rois, dict)
os.makedirs(path, exist_ok=True)
if use_realigned:
# we need to sample batches in this case
# sampler = torch_em.data.MinForegroundSampler(min_fraction=0.05, p_reject=.75)
raise NotImplementedError
else:
urls = CREMI_URLS["original"]
checksums = CHECKSUMS["original"]
data_paths = []
data_rois = []
for name in samples:
url = urls[name]
checksum = checksums[name]
data_path = os.path.join(path, f"sample{name}.h5")
# CREMI SSL certificates expired, so we need to disable verification
util.download_source(data_path, url, download, checksum, verify=False)
data_paths.append(data_path)
data_rois.append(rois.get(name, np.s_[:, :, :]))
if defect_augmentation_kwargs is not None and "artifact_source" not in defect_augmentation_kwargs:
# download the defect volume
url = CREMI_URLS["defects"]
checksum = CHECKSUMS["defects"]
defect_path = os.path.join(path, "cremi_defects.h5")
util.download_source(defect_path, url, download, checksum)
defect_patch_shape = (1,) + tuple(patch_shape[1:])
artifact_source = torch_em.transform.get_artifact_source(defect_path, defect_patch_shape,
min_mask_fraction=0.75,
raw_key="defect_sections/raw",
mask_key="defect_sections/mask")
defect_augmentation_kwargs.update({"artifact_source": artifact_source})
raw_key = "volumes/raw"
label_key = "volumes/labels/neuron_ids"
# defect augmentations
if defect_augmentation_kwargs is not None:
raw_transform = torch_em.transform.get_raw_transform(
augmentation1=torch_em.transform.EMDefectAugmentation(**defect_augmentation_kwargs)
)
kwargs = util.update_kwargs(kwargs, "raw_transform", raw_transform)
kwargs, _ = util.add_instance_label_transform(
kwargs, add_binary_target=False, boundaries=boundaries, offsets=offsets
)
return torch_em.default_segmentation_dataset(
data_paths, raw_key, data_paths, label_key, patch_shape, rois=data_rois, **kwargs
)
def get_cremi_loader(
path,
patch_shape,
batch_size,
samples=("A", "B", "C"),
use_realigned=False,
download=False,
offsets=None,
boundaries=False,
rois={},
defect_augmentation_kwargs={
"p_drop_slice": 0.025,
"p_low_contrast": 0.025,
"p_deform_slice": 0.0,
"deformation_mode": "compress",
},
**kwargs,
):
"""Dataset for the segmentation of neurons in EM. See 'get_cremi_dataset' for details.
"""
dataset_kwargs, loader_kwargs = util.split_kwargs(
torch_em.default_segmentation_dataset, **kwargs
)
ds = get_cremi_dataset(
path=path,
patch_shape=patch_shape,
samples=samples,
use_realigned=use_realigned,
download=download,
offsets=offsets,
boundaries=boundaries,
rois=rois,
defect_augmentation_kwargs=defect_augmentation_kwargs,
**dataset_kwargs,
)
return torch_em.get_data_loader(ds, batch_size=batch_size, **loader_kwargs)
|
constantinpape/torch-em
|
torch_em/data/datasets/cremi.py
|
cremi.py
|
py
| 4,761
|
python
|
en
|
code
| 42
|
github-code
|
6
|
19007770169
|
import traceback,json,pdb
from datetime import date,timedelta,datetime
import pandas as pd
from flask import jsonify
from backEnd.database.db_connection import set_connection
from answergen import create_single_column_response,create_multi_column_response,get_highlight_response
from frontendAPI import city_region_mapping
todays_date = str(datetime.now().date())
def get_table_df(table_name, db_name,movie_name):
connection = set_connection(db_name)
# cursor = connection.cursor()
try:
table = pd.read_sql(
'SELECT avg(Seat_Percent) as occupancy,count(*) as shows,Crawl_Hour,City_Name,Movie_Name,Show_date '
'FROM {0} where Movie_Name = "{1}" and Crawl_Hour = 18 and Show_Date = "{2}" group by '
'Crawl_Hour,City_Name,Show_Date,Movie_Name'.format(table_name,movie_name,todays_date), con=connection)
table = table.fillna('')
table = table.replace('National-Capital-Region-NCR','NCR')
return table
except Exception:
print(traceback.format_exc())
def get_response_city(movie_name,sortby,filterby,groupby=False):
#default
mid_txt = " Cities with highest {} are ".format(sortby)
resp_count = 4
sort = False
filterAlais = "Top cities"
#test alias
sortbyAlias=sortby
if sortby.lower() == "occupancy":
sortbyAlias = "% occupancy"
if sortby.lower() == "shows":
sortbyAlias = " shows"
#filterby drill
if filterby:
if "highest" in filterby.lower():
resp_count = 1
mid_txt = "City with highest {} is ".format(sortby)
sort = False
filterAlais=" Cities with higher {}".format(sortby)
if "lowest" in filterby.lower():
resp_count = 1
mid_txt = "City with lowest {} is ".format(sortby)
sort = True
filterAlais = " Cities with lower {}".format(sortby)
df_table = get_table_df('BMS_Regional_Occupancy', 'disney', movie_name)
print(df_table)
df_table['occupancy'] = round(df_table['occupancy'],2)
cityAns = create_single_column_response(df_table, 'City_Name',mid_txt, n_answer=resp_count, sort_by=sortby,
sort_asc=sort)
cityAns+='<br><br/>'
cityAns+=create_multi_column_response(df_table, 'City_Name', sortby, '{} include '.format(filterAlais),
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
sort = not sort
cityAns+='<br><br/>'
cityAns+=create_multi_column_response(df_table, 'City_Name', sortby, " Cities with lower {} include ".format(sortby),
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
return cityAns
def get_response_performance_city(movie_name,sortby,filterby,groupby=False):
#default
mid_txt = " Cities with highest performance {} are ".format(sortby)
resp_count = 4
sort = False
filterAlais = " Cities with high performance"
#test alias
# sortbyAlias=sortby
sortbyAlias = "%"
#filterby drill
if filterby:
if "highest" in filterby.lower():
resp_count = 1
mid_txt = "City with highest performance {} is ".format(sortby)
sort = False
filterAlais=" Cities with high performance {}".format(sortby)
if "lowest" in filterby.lower():
resp_count = 1
mid_txt = "City with lowest performance {} is ".format(sortby)
sort = True
filterAlais = " Cities with low performance {}".format(sortby)
#get table from Db
df_table = get_table_df('BMS_Regional_Occupancy', 'disney', movie_name)
#adding volume, percentage column to df
df_table = df_with_performance_volume_percentage(df_table)
print(df_table)
print(df_table['percentage'].sum())
#Converting dataframe to readable text response.
perfAns = create_single_column_response(df_table, 'City_Name',mid_txt, n_answer=resp_count, sort_by=sortby,
sort_asc=sort)
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, '{} include '.format(filterAlais),
answer_suffix=sortbyAlias, answer_prefix=' with approx ', n_answer=3, sort_by=sortby,
sort_asc=sort)
print(perfAns)
sort = not sort
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, " Cities with lower performance include ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
return perfAns
def get_response_performance_region(movie_name,sortby,filterby,regionName):
#default
mid_txt = " Cities with highest performance {0} in {1} India are ".format(sortby,regionName)
resp_count = 4
sort = False
filterAlais = " Cities with high performance in {} India".format(regionName)
#test alias
# sortbyAlias=sortby
sortbyAlias = "%"
try:
#filterby drill
if filterby:
if "highest" in filterby.lower():
resp_count = 1
mid_txt = "City with highest performance {0} in {1} India is ".format(sortby,regionName)
sort = False
filterAlais=" Cities with high performance {0} in {1} India ".format(sortby,regionName)
if "lowest" in filterby.lower():
resp_count = 1
mid_txt = "City with lowest performance {0} in {1} India is ".format(sortby,regionName)
sort = True
filterAlais=" Cities with low performance {0} in {1} India ".format(sortby,regionName)
#get table from Db
df_table = get_table_df('BMS_Regional_Occupancy', 'disney', movie_name)
#filtering cities as per region
city_list = city_region_mapping.region_list[regionName.lower()]
df_table = df_table[df_table['City_Name'].isin(city_list)]
#adding volume, percentage column to df
df_table = df_with_performance_volume_percentage(df_table)
print(df_table)
print(df_table['percentage'].sum())
#Converting dataframe to readable text response.
perfAns = create_single_column_response(df_table, 'City_Name',mid_txt, n_answer=resp_count, sort_by=sortby,
sort_asc=sort)
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, '{} include '.format(filterAlais),
answer_suffix=sortbyAlias, answer_prefix=' with approx ', n_answer=3, sort_by=sortby,
sort_asc=sort)
print(perfAns)
sort = not sort
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, " Cities with lower performance include ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
except Exception as e:
print(e)
perfAns = "Data for {} is not available".format(regionName)
return (perfAns)
def get_BMS_likes(movie_name):
current_date = date_shifter("2019-10-10",0)
week_before = "2019-10-10"
yesterday_date = "2019-10-09"
connection = set_connection('disney')
table_name = "BMS_User_Likes"
table = pd.read_sql(
'SELECT Likes,Crawl_Date from {0} where Movie = "{1}" '
.format(table_name,movie_name), con=connection)
table = table.sort_values(by='Crawl_Date')
print(table)
ans_likes = ""
#pdb.set_trace()
try:
current_likes = table[table['Crawl_Date']==datetime.now().date()]
current_likes = int(current_likes['Likes'].values[0])
print(current_likes)
ans_likes = "{0} has {1} likes.".format(movie_name,get_highlight_response(current_likes))
yesterdays_likes = table[table['Crawl_Date']==date_shifter(todays_date,-1)]
yesterdays_likes = int(yesterdays_likes['Likes'].values[0])
if yesterdays_likes:
ans_likes += "<br><br>"
ans_likes += "Likes has increased by {} since yesterday.".format(get_highlight_response(current_likes-yesterdays_likes))
likes_week_before = table[table['Crawl_Date']==date_shifter(todays_date,-7)]
likes_week_before = int(likes_week_before['Likes'].values[0])
if likes_week_before:
percentage_increase = (current_likes - likes_week_before)/current_likes*100
ans_likes += " There is a {}% increase in likes since last week.".format(get_highlight_response(round(percentage_increase,2)))
print(ans_likes)
except Exception as e :
print(e)
if ans_likes:
return ans_likes
else:
return "Data not available for "+movie_name
return ans_likes
def get_YT_data(movie_name):
connection = set_connection('disney')
table_name = ""
table = pd.read_sql(
'SELECT Likes,Views from {0} where Crawl_Date = "2019-10-10" and Movie = "{1}" '.format(table_name,movie_name), con=connection)
def get_distribution_data(movie_name):
if movie_name == "WAR":
distribution_table = pd.read_csv('War_2019-10-11.csv')
elif movie_name == "The Sky is Pink":
distribution_table = pd.read_csv('The_Sky_Is_Pink_2019-10-11.csv')
elif movie_name == "Joker":
distribution_table = pd.read_csv('Joker_2019-10-11.csv')
else:
return 'Movie not found'
atp_national = round(distribution_table['Ticket Price'].mean(axis=0))
distribution_table = distribution_table.groupby(['Theatre Region']).agg({'Ticket Price': ['mean']})
print(distribution_table)
print(atp_national)
#default
sortby = "Ticket Price_mean"
sort = False
#test alias
sortbyAlias = "₹"
distribution_table = flatten_columns(distribution_table)
distribution_table = distribution_table.reset_index(level=0)
print(distribution_table)
perfAns = "Average Ticket Price for {0} is {1}₹.".format(get_highlight_response(movie_name),get_highlight_response(atp_national))
perfAns+='<br><br>'
distribution_table = distribution_table.round(2)
perfAns+=create_multi_column_response(distribution_table, 'Theatre Region', sortby, " Cities with higher Average Ticket Price (ATP) include ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
sort = not sort
perfAns+='<br><br>'
perfAns+=create_multi_column_response(distribution_table, 'Theatre Region', sortby, " Cities with lower ATP include are ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
return perfAns
def date_shifter(date_in,day_shift,string_resp=False):
date_in = datetime.strptime(date_in,'%Y-%m-%d').date()
date_out = date_in + timedelta(days=day_shift)
if not string_resp:
return date_out
return str(date_out)
def df_with_performance_volume_percentage(df_in):
"""this function will add volume and volume percentage to the dataframe"""
#round occupancy
df_in['occupancy'] = df_in['occupancy'].round()
#volumne = occupancy*shows
df_in['Volume'] = df_in['shows']*df_in['occupancy']
#calculating percentage occupancy
volSum = df_in['Volume'].sum()
df_in['percentage'] = round((df_in['Volume'] / volSum)*100,2)
return df_in
def flatten_columns(df):
df.columns = ['_'.join(tup).rstrip('_') for tup in df.columns.values]
return df
#get_BMS_likes('Frozen 2')
# get_response('Bharat','','shows','')
# get_response_performance_city('Bharat','percentage','highest')
# get_response_performance_region('Bharat','percentage','','All')
# print(get_highlight_response("cacha"))
# print(date_shifter('2019-10-10' ,-1))
# get_distribution_data('Joker')
|
divakar-yadav/Backend-APIs
|
frontendAPI/executor.py
|
executor.py
|
py
| 11,994
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42589875789
|
def medias(records):
soma = 0
num = 0
for a in records[2]:
soma += a
num += 1
medias = soma/num
return medias
def sort_grades(records):
names = sorted(records)
by_order_records = tuple(sorted(names, key=medias, reverse = True))
return by_order_records
|
JoaoCarlosPires/feup-fpro
|
grades.py
|
grades.py
|
py
| 307
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1772299948
|
from __init__ import CURSOR, CONN
import string
class Group:
CONTINENT = {
"Bettle": ["Burg", "Hommoch", "Lei"],
"Jidoth": ["Lord's Port", "Oth", "Tirena", "Videlsen"],
"Mollen": ["Aldon", "Exigot", "Len City", "Pelta", "The Villages Of Southern Aldon", "Vanna's Perch"],
"Rise": ["Expanse", "Mouth", "Shelf"]
}
all = {}
names = []
def __init__(self, name, continent, city, id=None):
self.id = id
self.name = self._is_unique_name(name)
self.continent = continent
self.city = city
Group.names.append(name.upper())
def __repr__(self):
name_ = f"Group {self.id}: {self.name}"
cont_ = f"Continent: {self.continent}"
city_ = f"City: {self.city}"
def pick_length():
if len(name_) >= len(cont_) and len(name_) >= len(city_):
return "_" * len(name_)
if len(cont_) >= len(city_):
return "_" * len(cont_)
return "_" * len(city_)
return f"{pick_length()}\n{name_}\n{cont_}\n{city_}\nMembers: {len(self.get_members())}/4\n{pick_length()}"
def _is_unique_name(self, name):
if name.upper() in Group.names:
raise ValueError("Group's name must be unique.")
else:
return name
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if isinstance(name, str) and len(name):
self._name = name
else:
raise ValueError("Name must be a non-empty string.")
@property
def continent(self):
return self._continent
@continent.setter
def continent(self, continent):
if continent.capitalize() in Group.CONTINENT:
self._continent = continent.capitalize()
else:
raise ValueError(f"{continent.capitalize()} is not a valid continent.")
@property
def city(self):
return self._city
@city.setter
def city(self, city):
if string.capwords(city) in Group.CONTINENT[self.continent]:
self._city = string.capwords(city)
else:
raise ValueError(f"{string.capwords(city)} is not a valid city in {self.continent}.")
# Creating, Deleting, and Saving the Table
@classmethod
def make_table(cls):
sql = """
CREATE TABLE IF NOT EXISTS groups (
id INTEGER PRIMARY KEY,
name TEXT,
continent TEXT,
city TEXT)
"""
CURSOR.execute(sql)
CONN.commit()
@classmethod
def remove_table(self):
sql = """
DROP TABLE IF EXISTS groups;
"""
CURSOR.execute(sql)
CONN.commit()
def save_new_row(self):
sql = """
INSERT INTO groups (name, continent, city)
VALUES (?, ?, ?)
"""
CURSOR.execute(sql,(self.name, self.continent, self.city))
CONN.commit()
self.id = CURSOR.lastrowid
type(self).all[self.id] = self #(this adds it to the 'all' dictionary, with the new self.id as the key!)
#==================================
# CRUD for the SQL Database
@classmethod
def create(cls, name, continent, city):
group = cls(name, continent, city)
group.save_new_row()
return group
def update(self):
sql = """
UPDATE groups
SET name = ?, continent = ?, city = ?
WHERE id = ?
"""
CURSOR.execute(sql, (self.name, self.continent, self.city, self.id))
CONN.commit()
def delete(self):
sql = """
DELETE FROM groups
WHERE id = ?
"""
CURSOR.execute(sql, (self.id,))
CONN.commit()
del type(self).all[self.id] #(removes from 'all' dictionary)
self.id = None #(key no longer associated with that id)
#==================================
# Class Methods to Search Group Information
@classmethod
def instance_from_database(cls, row):
group = cls.all.get(row[0])
if group:
group.name = row[1]
group.continent = row[2]
group.city = row[3]
else:
group = cls(row[1], row[2], row[3])
group.id = row[0]
cls.all[group.id] = group
return group
# just a mediator method for the other methods that actually let one view the contents of the database
@classmethod
def get_all(cls):
sql = """
SELECT *
FROM groups
"""
database = CURSOR.execute(sql).fetchall()
return [cls.instance_from_database(n) for n in database]
@classmethod
def get_continent(cls, continent):
sql = """
SELECT *
FROM groups
WHERE continent = ?
"""
groups = CURSOR.execute(sql, (continent,)).fetchall()
return [cls.instance_from_database(n) for n in groups]
@classmethod
def get_city(cls, city):
sql = """
SELECT *
FROM groups
WHERE city = ?
"""
groups = CURSOR.execute(sql, (city,)).fetchall()
return [cls.instance_from_database(n) for n in groups]
@classmethod
def get_by_id(cls, id):
sql = """
SELECT *
FROM groups
WHERE id = ?
"""
n = CURSOR.execute(sql, (id,)).fetchone()
return cls.instance_from_database(n) if n else None
@classmethod
def get_by_name(cls, name):
sql = """
SELECT *
FROM groups
WHERE upper(name) = ?
"""
n = CURSOR.execute(sql, (name.upper(),)).fetchone()
return cls.instance_from_database(n) if n else None
def get_members(self):
from adventurer import Adventurer
sql = """
SELECT * FROM adventurers
WHERE group_id = ?
"""
CURSOR.execute(sql, (self.id,),)
advs = CURSOR.fetchall()
return [Adventurer.instance_from_database(n) for n in advs]
def is_full(self):
if len(self.get_members()) < 4:
return False
else:
return True
|
regisaslewis/adventurers-unite
|
group.py
|
group.py
|
py
| 6,305
|
python
|
en
|
code
| 1
|
github-code
|
6
|
22319846051
|
import argparse
import os
from . import _argparse
__version__ = '0.0.1'
_BUFSIZ = 4096 * 16
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
def do_cat(ifd, ofd, *, unbuffered):
# Currently, always act as if `unbuffered` is True.
while True:
buf = os.read(ifd, _BUFSIZ)
if not buf:
break
while buf:
n = os.write(ofd, buf)
buf = buf[n:]
def main(argv=None):
parser = _argparse.MyArgumentParser(description='concatenate files and print on the standard output')
parser.add_argument('files', action=_argparse.MyAppendAction)
parser.add_argument('--version', action='version', version=__version__, help='output version information and exit')
parser.add_argument('-A', '--show-all', action='store_true', help='equivalent to -vET')
parser.add_argument('-b', '--number-nonblank', action='store_true', help='number nonempty output lines, overrides -n')
parser.add_argument('-e', action='store_true', help='equivalent to -vE')
parser.add_argument('-E', '--show-ends', action='store_true', help='display $ at end of each line')
parser.add_argument('-n', '--number', action='store_true', help='number all output lines')
parser.add_argument('-s', '--squeeze-blank', action='store_true', help='suppress repeated empty output lines')
parser.add_argument('-t', action='store_true', help='equivalent to -vT')
parser.add_argument('-T', '--show-tabs', action='store_true', help='display TAB characters as ^I')
parser.add_argument('-u', action='store_true', dest='unbuffered', help='force unbuffered output')
parser.add_argument('-v', '--show-nonprinting', action='store_true', help='use ^ and M- notation, except for LFD and TAB')
ns = parser.parse_args(argv)
if ns.show_all:
ns.show_nonprinting = True
ns.show_ends = True
ns.show_tabs = True
del ns.show_all
if ns.e:
ns.show_nonprinting = True
ns.show_ends = True
del ns.e
if ns.t:
ns.show_nonprinting = True
ns.show_tabs = True
del ns.t
if ns.files is None:
ns.files = ['-']
ns.unbuffered = True
if ns.number or ns.number_nonblank:
raise NotImplementedError
if ns.show_ends or ns.show_nonprinting or ns.show_tabs:
raise NotImplementedError
if ns.squeeze_blank:
raise NotImplementedError
for f in ns.files:
if f == '-':
do_cat(STDIN_FILENO, STDOUT_FILENO, unbuffered=ns.unbuffered)
else:
fd = os.open(f, os.O_RDONLY)
try:
do_cat(fd, STDOUT_FILENO, unbuffered=ns.unbuffered)
finally:
os.close(fd)
if __name__ == '__main__':
main()
|
o11c/python-coreutils
|
coreutils/cat.py
|
cat.py
|
py
| 2,738
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21738867922
|
import time
start_time = time.time()
f = open("names_1.txt", "r")
names_1 = f.read().split("\n") # List containing 10000 names
f.close()
f = open("names_2.txt", "r")
names_2 = f.read().split("\n") # List containing 10000 names
f.close()
duplicates = [] # Return the list of duplicates in this data structure
# Replace the nested for loops below with your improvements
class BinarySearchTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Insert the given value into the tree
def insert(self, value):
def find_insert(current_node, value):
if value < current_node.value:
if current_node.left is None:
current_node.left = BinarySearchTree(value)
return
else:
return find_insert(current_node.left, value)
else:
if current_node.right is None:
current_node.right = BinarySearchTree(value)
return
else:
return find_insert(current_node.right, value)
find_insert(self, value)
# Return True if the tree contains the value
# False if it does not
def contains(self, target):
def search(current_node, target):
if current_node.value == target:
return True
if target < current_node.value:
if current_node.left is not None:
return search(current_node.left, target)
if target >= current_node.value:
if current_node.right is not None:
return search(current_node.right, target)
return False
return search(self, target)
bst = BinarySearchTree("")
for name_2 in names_2:
bst.insert(name_2)
for name_1 in names_1:
if bst.contains(name_1):
duplicates.append(name_1)
"""
---------------------------------
Original problem is O(n*m) since the 2 datasets are not guaranteed to be the same. Optimally if the files are the same length if would be O(n^2)
---------------------------------
"""
end_time = time.time()
print(f"{len(duplicates)} duplicates:\n\n{', '.join(duplicates)}\n\n")
print(f"runtime: {end_time - start_time} seconds")
# ---------- Stretch Goal -----------
# Python has built-in tools that allow for a very efficient approach to this problem
# What's the best time you can accomplish? Thare are no restrictions on techniques or data
# structures, but you may not import any additional libraries that you did not write yourself.
|
MarkHalls/Sprint-Challenge--Data-Structures-Python
|
names/names.py
|
names.py
|
py
| 2,616
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73825796027
|
"""
分类算法应用案例-汽车金融预测用户是否会贷款买车
"""
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
import pydotplus
# 文件路径
data_path = 'C:/Users/Lenovo/Desktop/car.csv'
# 读取数据文件
data_frame = pd.read_csv(data_path, encoding='gbk')
# print(data_frame.head())
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(data_frame.values[:, :-1], data_frame.values[:, -1], test_size=0.3)
# 决策树调参---1.分支依据
def adjust_criterion():
# 参数列表
param_dic = {'criterion':['gini', 'entropy']}
# 构建网格搜索器
gscv = GridSearchCV(estimator=DecisionTreeClassifier(), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_))
# 决策树调参---2.深度
def adjust_depth():
# 参数列表
param_dic = {'max_depth': range(1, 10)}
# 构建网格搜索器
gscv = GridSearchCV(estimator=DecisionTreeClassifier(criterion='gini'), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_))
# 决策树调参---3.最大叶子结点数
def adjust_max_leaf_nodes():
# 参数列表
param_dic = {'max_leaf_nodes': range(2, 100)}
# 构建网格搜索器
gscv = GridSearchCV(estimator=DecisionTreeClassifier(criterion='gini', max_depth=4), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_)) # 14
# KNN调参 K
def adjust_k():
# 参数列表
param_dic = {'n_neighbors': range(1, 20)}
# 构建网格搜索器
gscv = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_)) # 3
# 用每个算法选出来的最优参数预测并得出ROC曲线
def plot_roc(clfs):
"""
:param clf: 分类器列表
:return: None
"""
for index, clf in enumerate(clfs):
# 训练数据
clf.fit(X_train, y_train)
# 输出混淆矩阵
pre = clf.predict(X_test)
# 输出预测测试集的概率
y_prb_1 = clf.predict_proba(X_test)[:, 1]
# 得到误判率、命中率、门限
fpr, tpr, thresholds = roc_curve(y_test, y_prb_1)
# 计算auc
roc_auc = auc(fpr, tpr)
# 绘图
plt.plot(fpr, tpr, label='{0}_AUC = {1:.2f}'.format(index, roc_auc))
# 对ROC曲线图正常显示做的参数设定
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.title('ROC曲线')
# 设置x、y轴刻度范围
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.legend(loc='lower right')
# 绘制参考线
plt.plot([0, 1], [0, 1], 'r--')
plt.ylabel('命中率')
plt.xlabel('误判率')
plt.show()
# 输出树形图
def out_image():
# 模型初始化
clf = DecisionTreeClassifier(criterion='gini', max_depth=4, max_leaf_nodes=14)
# 训练模型
clf.fit(X_train, y_train)
# 输出png(pdf)图形文件
dot_data = tree.export_graphviz(clf, out_file=None, filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_png(data_path.replace('.csv', '.png'))
if __name__ == '__main__':
# adjust_criterion()
# adjust_depth()
# adjust_max_leaf_nodes()
# adjust_k()
# clfs = [DecisionTreeClassifier(criterion='gini', max_depth=4, max_leaf_nodes=14), KNeighborsClassifier(n_neighbors=3)]
# plot_roc(clfs)
out_image()
|
ghostlyFeng/ML
|
Cluster/car.py
|
car.py
|
py
| 4,212
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23010318012
|
__all__ = (
"__version__",
"AssumedDiagonalGraphTraversal",
"Edge",
"Flow",
"get_path_from_matrix",
"guess_production_exchanges",
"NewNodeEachVisitGraphTraversal",
"Node",
"path_as_brightway_objects",
"to_normalized_adjacency_matrix",
)
from .graph_traversal_utils import get_path_from_matrix, path_as_brightway_objects
from .matrix_tools import guess_production_exchanges, to_normalized_adjacency_matrix
from .utils import get_version_tuple
from .graph_traversal import (
AssumedDiagonalGraphTraversal,
Edge,
Flow,
NewNodeEachVisitGraphTraversal,
Node,
)
__version__ = get_version_tuple()
|
brightway-lca/bw_graph_tools
|
bw_graph_tools/__init__.py
|
__init__.py
|
py
| 652
|
python
|
en
|
code
| 1
|
github-code
|
6
|
7894457497
|
from flask import Flask, render_template, request
import os
import json
from nova_code import start_vm
from swift_code import upload_to_container, check_file_exists
container_upload = 'uploads'
container_download = 'rendered'
environ = json.load(open(os.environ['CRED_FILE']))['CONFIG']['CONFIG_VARS']
app = Flask(__name__)
app.debug = True
@app.route("/example", methods=['GET', 'POST'])
def example(btn_clicked=""):
if request.method == 'POST':
filename = request.form['files']
mail = request.form['mail']
f = open("examples/" + filename, 'r')
obj_name = upload_to_container(f, container_upload, environ)
start_vm(container_upload, obj_name, container_download, environ, mail)
return render_template('upload.jinja', btn_clicked=obj_name)
else:
files = os.listdir("examples")
return render_template('upload.jinja', btn_clicked='example', files=files)
@app.route("/")
@app.route('/upload', methods=['GET', 'POST'])
def upload_file(btn_clicked=""):
if request.method == 'POST':
f = request.files['FileToUpload']
mail = request.form['mail']
obj_name = upload_to_container(f, container_upload, environ)
start_vm(container_upload, obj_name, container_download, environ, mail)
return render_template('upload.jinja', btn_clicked=obj_name)
else:
return render_template('upload.jinja', btn_clicked='no')
@app.route('/file/<filename>')
def show_output(filename):
if check_file_exists(filename, container_download, environ):
return render_template('download.jinja', filename=filename)
else:
return render_template('wait.jinja', filename=filename)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.environ['PORT']))
|
stepanvanecek/cah-blender
|
main.py
|
main.py
|
py
| 1,784
|
python
|
en
|
code
| 1
|
github-code
|
6
|
73815172026
|
import selenium.webdriver
from bonobo_selenium._version import __version__
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/601.4.4 (KHTML, like Gecko) Version/9.0.3 Safari/601.4.4'
def create_profile(use_tor=False):
_profile = selenium.webdriver.FirefoxProfile()
_profile.set_preference("toolkit.startup.max_resumed_crashes", "-1")
if use_tor:
# tor connection
_profile.set_preference('network.proxy.type', 1)
_profile.set_preference('network.proxy.socks', '127.0.0.1')
_profile.set_preference('network.proxy.socks_port', 9050)
# user agent
_profile.set_preference("general.useragent.override", USER_AGENT)
return _profile
def create_browser(profile):
_browser = selenium.webdriver.Firefox(profile)
# _browser.implicitly_wait(10)
# _browser.set_page_load_timeout(10)
return _browser
def create_chrome_browser():
browser = selenium.webdriver.Chrome()
return browser
__all__ = [
'USER_AGENT',
'__version__',
'create_browser',
'create_profile',
]
|
python-bonobo/bonobo-selenium
|
bonobo_selenium/__init__.py
|
__init__.py
|
py
| 1,078
|
python
|
en
|
code
| 4
|
github-code
|
6
|
36609185341
|
#!/usr/bin/env python
import re
def revert(text):
result = []
for word, space in re.findall(r'([^\s]*)(\s*)', text):
result += [i for i in reversed(word)]
result.append(space)
return ''.join(result)
|
raimu/code-kata
|
python/BackwardsTalk/backward_talk.py
|
backward_talk.py
|
py
| 228
|
python
|
en
|
code
| 0
|
github-code
|
6
|
31315499323
|
###########
# This script builds the database for the web visualization
# It can take a long time to run, so it is recommended to run it in the background
# Here we we are going to take a folder of ABF or NWB files, and extract some features
# we will choose to use a custom backend or ipfx to extract the features
# from each file. We will then save the features in a database file.
# The database file will be used by the web visualization to display the data.
# The database file is a JSON file, or csv
###########
# Import libraries
import os
import sys
import json
import glob
import argparse
import pandas as pd
import numpy as np
import logging
from functools import partial
import copy
import joblib
import matplotlib.pyplot as plt
import scipy.stats
from multiprocessing import pool, freeze_support
# Import ipfx
import ipfx
import ipfx.script_utils as su
from ipfx.stimulus import StimulusOntology
import allensdk.core.json_utilities as ju
from ipfx.bin import run_feature_collection
from ipfx import script_utils as su
from ipfx.sweep import SweepSet, Sweep
import ipfx.stim_features as stf
import ipfx.stimulus_protocol_analysis as spa
import ipfx.data_set_features as dsf
import ipfx.time_series_utils as tsu
import ipfx.feature_vectors as fv
from ipfx.dataset.create import create_ephys_data_set
# Import custom functions
from pyAPisolation import patch_utils
from pyAPisolation.loadNWB import loadNWB, GLOBAL_STIM_NAMES
try:
from pyAPisolation.dev import stim_classifier as sc
except:
print("Could not import stim_classifier")
# ==== GLOBALS =====
_ONTOLOGY = ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE)
_UNIT_ONTOLOGY = {'amp': ['amp', 'ampere', 'amps', 'amperes', 'A'],'volt': ['volt', 'v', 'volts', 'V'], 'sec': ['sec', 's', 'second', 'seconds', 'secs', 'sec']}
log = logging.getLogger(__name__)
def glob_files(folder, ext="nwb"):
#this function will take a folder and a file extension, and return a list of files
# with that extension in that folder
return glob.glob(folder + "/**/*." + ext, recursive=True)
def run_analysis(folder, backend="ipfx", outfile='out.csv', ext="nwb", parallel=False):
files = glob_files(folder)[::-1]
file_idx = np.arange(len(files))
if backend == "ipfx":
# Use ipfx to extract features
#get_stimulus_protocols(files)
GLOBAL_STIM_NAMES.stim_inc = ['']
GLOBAL_STIM_NAMES.stim_exc = []
get_data_partial = partial(data_for_specimen_id,
passed_only=False,
data_source='filesystem',
ontology=None,
file_list=files)
if parallel == True:
#Run in parallel
parallel = joblib.cpu_count()
results = joblib.Parallel(n_jobs=1, backend='multiprocessing')(joblib.delayed(get_data_partial)(specimen_id) for specimen_id in file_idx)
elif backend == "custom":
raise(NotImplementedError)
# Use custom backend to extract features
results = []
for f in files:
# Extract features from each file
result = feature_extraction.extract_features(f)
results.append(result)
# Save results
df = pd.DataFrame(results)
df.to_csv(outfile, index=False)
results = pd.DataFrame().from_dict(results).set_index('specimen_id')
return results
def main():
#main function to be called when running this script
# Handle command line arguments
parser = argparse.ArgumentParser(description='Build database for web visualization')
parser.add_argument('folder', type=str, help='Folder containing ABF or NWB files')
parser.add_argument('--backend', type=str, default="ipfx", help='Backend to use for feature extraction')
parser.add_argument('--outfile', type=str, default="out.csv", help='Output file name')
args = parser.parse_args()
# Run analysis
run_analysis(args.folder, args.backend, args.outfile)
#======== IPFX functions ===========
#clone the function from ipfx/stimulus_protocol_analysis.py
# here we will modify it to handle test pulses intelligently, then overwrite the function in ipfx for this session
def get_stim_characteristics(i, t, test_pulse=True, start_epoch=None, end_epoch=None, test_pulse_length=0.250):
"""
Identify the start time, duration, amplitude, start index, and end index of a general stimulus.
"""
fs = 1/(t[1] - t[0])
di = np.diff(i)
di_idx = np.flatnonzero(di) # != 0
start_idx_idx = 0
if len(di_idx[start_idx_idx:]) == 0: # if no stimulus is found
return None, None, 0.0, None, None
#here we will check if the first up/down is a test pulse, and skip it if it is
#we are assuming that the test pulse is within the first 250ms of the stimulus
#TODO make this more robust
if len(di_idx) > 3: # if there are more than 3 up/down transitions, there is probably a test pulse
if (di_idx[1]) < test_pulse_length*fs: # skip the first up/down (test pulse) if present, and with in the first 250ms
start_idx_idx = 2
else:
start_idx_idx = 0
elif len(di_idx) < 3:
start_idx_idx = 0
start_idx = di_idx[start_idx_idx] + 1 # shift by one to compensate for diff()
end_idx = di_idx[-1]
if start_idx >= end_idx: # sweep has been cut off before stimulus end
return None, None, 0.0, None, None
start_time = float(t[start_idx])
duration = float(t[end_idx] - t[start_idx-1])
stim = i[start_idx:end_idx+1]
peak_high = max(stim)
peak_low = min(stim)
if abs(peak_high) > abs(peak_low):
amplitude = float(peak_high)
else:
amplitude = float(peak_low)
return start_time, duration, amplitude, start_idx, end_idx
ipfx.stim_features.get_stim_characteristics = get_stim_characteristics
def parse_long_pulse_from_dataset(data_set):
sweeps = []
start_times = []
end_times = []
for sweep in np.arange(len(data_set.dataY)):
i = data_set.dataC[sweep]*1
t = data_set.dataX[sweep]
v = data_set.dataY[sweep]
dt = t[1] - t[0]
#if its not current clamp
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"]) != "amp":
continue
#if the sweep v is in volts, convert to mV, ipfx wants mV
if match_unit(data_set.sweepMetadata[sweep]['resp_dict']["unit"]) == "volt":
#sometimes the voltage is in volts, sometimes in mV, this is a hack to fix that
if np.max(v) > 500 and np.min(v) < -500:
#possibly in nV or something else, convert to mV anyway
v = v/1000
elif np.max(v) < 1 and np.min(v) > -1:
#probably in volts, convert to mV
v = v*1000
#if the sweep i is in amps, convert to pA, ipfx wants pA
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"])=="amp":
if np.max(i) < 0.1 and np.min(i) > -0.1:
#probably in amp, convert to picoAmps
i = np.rint(i*1000000000000).astype(np.float32)
else:
#probably in pA already
i = np.rint(i).astype(np.float32)
#sometimes i will have a very small offset, this will remove it
i[np.logical_and(i < 5, i > -5)] = 0
if match_protocol(i, t) != "Long Square":
continue
start_time, duration, amplitude, start_idx, end_idx = get_stim_characteristics(i, t)
if start_time is None:
continue
#construct a sweep obj
start_times.append(start_time)
end_times.append(start_time+duration)
sweep_item = Sweep(t, v, i, clamp_mode="CurrentClamp", sampling_rate=int(1/dt), sweep_number=sweep)
sweeps.append(sweep_item)
return sweeps, start_times, end_times
def data_for_specimen_id(specimen_id, passed_only, data_source, ontology, file_list=None, amp_interval=20, max_above_rheo=100, debug=True):
result = {}
result["specimen_id"] = file_list[specimen_id]
try:
#this is a clone of the function in ipfx/bin/run_feature_collection.py,
# here we are gonna try to use it to handle data that may not be in an NWB format IPFX can handle
_, _, _, _, data_set = loadNWB(file_list[specimen_id], return_obj=True)
if data_set is None or len(data_set.dataY)<1:
return result
#here we are going to perform long square analysis on the data,
#ipfx does not play nice with many NWBs on dandi, so we are going to link into the lower level functions
#and do the analysis ourselves
#hopefully this will be fixed in the future and we can use ipfx for this
sweeps = []
start_times = []
end_times = []
debug_log = {}
for sweep in np.arange(len(data_set.dataY)):
i = np.nan_to_num(data_set.dataC[sweep]*1)
t = data_set.dataX[sweep]
v = np.nan_to_num(data_set.dataY[sweep])
dt = t[1] - t[0]
#if its not current clamp
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"]) != "amp":
logging.debug(f"sweep {sweep} is not current clamp")
#debug_log[sweep] = "not current clamp"
continue
#if the sweep v is in volts, convert to mV, ipfx wants mV
if match_unit(data_set.sweepMetadata[sweep]['resp_dict']["unit"]) == "volt":
#sometimes the voltage is in volts, sometimes in mV, even thought it is logged as bolts this is a hack to fix that
if np.max(v) > 500 and np.min(v) < -500:
#possibly in nV or something else, convert to mV anyway
v = v/1000
elif np.max(v) < 1 and np.min(v) > -1:
#probably in volts, convert to mV
v = v*1000
#if the sweep i is in amps, convert to pA, ipfx wants pA
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"])=="amp":
if np.max(i) < 0.1 and np.min(i) > -0.1:
#probably in amp, convert to picoAmps
i = i*1000000000000
#probably in pA already
#i[np.logical_and(i < 5, i > -5)] = 0
#try to figure out if this is a long square
if match_protocol(i, t) != "Long Square":
logging.debug(f"skipping sweep {sweep} because it is not a long square")
debug_log[sweep] = "likely not a long square"
continue
start_time, duration, amplitude, start_idx, end_idx = get_stim_characteristics(i, t)
if QC_voltage_data(t, v, i) == 0:
logging.debug(f"skipping sweep {sweep} because it failed QC")
debug_log[sweep] = "failed QC"
continue
#construct a sweep obj
start_times.append(start_time)
end_times.append(start_time+duration)
sweep_item = Sweep(t, v, i, clamp_mode="CurrentClamp", sampling_rate=int(1/dt), sweep_number=sweep)
sweeps.append(sweep_item)
if debug:
for sweep in debug_log.keys():
print(f"sweep {sweep} failed QC because it was {debug_log[sweep]}")
if debug_log[sweep] == "failed QC":
plt.plot(data_set.dataX[sweep], data_set.dataY[sweep], label=f"{sweep} {debug_log[sweep]}", c='r')
else:
#plt.plot(data_set.dataX[sweep], data_set.dataY[sweep], label=f"{sweep} {debug_log[sweep]}", c='k')
continue
#plt.legend()
plt.pause(0.2)
#get the most common start and end times
start_time = scipy.stats.mode(np.array(start_times))[0][0]
end_time = scipy.stats.mode(np.array(end_times))[0][0]
#index out the sweeps that have the most common start and end times
idx_pass = np.where((np.array(start_times) == start_time) & (np.array(end_times) == end_time))[0]
sweeps = SweepSet(np.array(sweeps, dtype=object)[idx_pass].tolist())
lsq_spx, lsq_spfx = dsf.extractors_for_sweeps(
sweeps,
start=start_time , #if the start times are not the same, this will fail
end=end_time, #if the end times are not the same, this will fail
min_peak=-25,
)
lsq_an = spa.LongSquareAnalysis(lsq_spx, lsq_spfx,
subthresh_min_amp=-100.0)
if np.mean(start_times) < 0.01:
lsq_an.sptx.baseline_interval = np.mean(start_times)*0.1
lsq_an.sptx.sag_baseline_interval = np.mean(start_times)*0.1
lsq_features = lsq_an.analyze(sweeps)
result.update({
"input_resistance": lsq_features["input_resistance"],
"tau": lsq_features["tau"],
"v_baseline": lsq_features["v_baseline"],
"sag_nearest_minus_100": lsq_features["sag"],
"sag_measured_at": lsq_features["vm_for_sag"],
"rheobase_i": int(lsq_features["rheobase_i"]),
"fi_linear_fit_slope": lsq_features["fi_fit_slope"],
})
# Identify suprathreshold set for analysis
sweep_table = lsq_features["spiking_sweeps"]
mask_supra = sweep_table["stim_amp"] >= lsq_features["rheobase_i"]
sweep_indexes = fv._consolidated_long_square_indexes(sweep_table.loc[mask_supra, :])
amps = np.rint(sweep_table.loc[sweep_indexes, "stim_amp"].values - lsq_features["rheobase_i"])
spike_data = np.array(lsq_features["spikes_set"])
for amp, swp_ind in zip(amps, sweep_indexes):
if (amp % amp_interval != 0) or (amp > max_above_rheo) or (amp < 0):
continue
amp_label = int(amp / amp_interval)
first_spike_lsq_sweep_features = run_feature_collection.first_spike_lsq(spike_data[swp_ind])
result.update({"ap_1_{:s}_{:d}_long_square".format(f, amp_label): v
for f, v in first_spike_lsq_sweep_features.items()})
mean_spike_lsq_sweep_features = run_feature_collection.mean_spike_lsq(spike_data[swp_ind])
result.update({"ap_mean_{:s}_{:d}_long_square".format(f, amp_label): v
for f, v in mean_spike_lsq_sweep_features.items()})
sweep_feature_list = [
"first_isi",
"avg_rate",
"isi_cv",
"latency",
"median_isi",
"adapt",
]
result.update({"{:s}_{:d}_long_square".format(f, amp_label): sweep_table.at[swp_ind, f]
for f in sweep_feature_list})
result["stimulus_amplitude_{:d}_long_square".format(amp_label)] = int(amp + lsq_features["rheobase_i"])
rates = sweep_table.loc[sweep_indexes, "avg_rate"].values
result.update(run_feature_collection.fi_curve_fit(amps, rates))
#we should record the name of the stimuli used and the sweeps used
except Exception as e:
print("error with specimen_id: ", specimen_id)
print(e)
plt.close()
return result
plt.close()
return result
def find_time_index(t, t_0):
""" Find the index value of a given time (t_0) in a time series (t).
Parameters
----------
t : time array
t_0 : time point to find an index
Returns
-------
idx: index of t closest to t_0
"""
if t[0] <= t_0 <= t[-1]: "Given time ({:f}) is outside of time range ({:f}, {:f})".format(t_0, t[0], t[-1])
if t_0 < t[0]:
t_0 = t[0]
if t_0 > t[-1]:
t_0 = t[-1]
idx = np.argmin(abs(t - t_0))
return idx
tsu.find_time_index = find_time_index
#stimulus protocol analysis functions, here we will guess what stimulus protocol was used, and affix that to the stimulus ontology later
def get_stimulus_protocols(files, ext="nwb", method='random'):
#this function is going to take a folder and a file extension, and return a list of stimulus protocols, then guess what type of stimulus protocol was used
#method can be random, first, or all
#random will choose 10 random files and try to guess the stimulus protocol
if method == 'random':
files = np.random.choice(files, min(100, len(files)))
elif method == 'first':
files = files[0]
elif method == 'all':
pass
#here we are gonna set the GLOBAL_STIM_NAMES filter to blank, so that we can get all the stimulus names
GLOBAL_STIM_NAMES.stim_inc = ['']
GLOBAL_STIM_NAMES.stim_exc = []
classifier = sc.stimClassifier()
stim_to_use = []
for i, f in enumerate(files):
_, _, _, _, data_set = loadNWB(f, return_obj=True)
#
#[plt.plot(x) for x in data_set.dataY]
#plt.show()
for j in np.arange(len(data_set.dataY)):
sweep_meta = data_set.sweepMetadata[j]
i = data_set.dataC[j]
t = data_set.dataX[j]
#stim_protocol = match_protocol(i, t) #stimulus protocol is the matching protocol
stim_protocol = classifier.predict(i)
#reference mapped to the allen protocol names
if stim_protocol is not None:
#add stim_protocol to ontology
stim_name_1 = sweep_meta['description']
stim_name_2 = sweep_meta['stimulus_description']
for stim_name in [stim_name_1, stim_name_2]:
if stim_name not in GLOBAL_STIM_NAMES.stim_inc:
if stim_name != '' and stim_name != 'N//A' and stim_name != 'NA' and stim_name != 'N/A':
stim_to_use.append(stim_name)
GLOBAL_STIM_NAMES.stim_inc = stim_to_use
return copy.deepcopy(GLOBAL_STIM_NAMES)
def match_protocol(i, t, test_pulse=True, start_epoch=None, end_epoch=None, test_pulse_length=0.1):
#this function will take a stimulus and return the stimulus protocol at least it will try
#first we will try to match the stimulus protocol to a known protocol
classifier = sc.stimClassifier()
start_time, duration, amplitude, start_idx, end_idx = get_stim_characteristics(i, t, test_pulse=test_pulse, start_epoch=start_epoch, end_epoch=end_epoch, test_pulse_length=test_pulse_length)
pred = classifier.decode(classifier.predict(i.reshape(1, -1)))[0]
if pred=="long_square":
return "Long Square"
if start_time is None:
#if we can't find the start time, then we can't identify the stimulus protocol
return None
if duration > 0.25:
#if the stimulus is longer than 500ms, then it is probably a long square
return match_long_square_protocol(i, t, start_idx, end_idx)
elif duration < 0.1:
#if the stimulus is less than 100ms, then it is probably a short square
return match_short_square_protocol(i, t)
else:
#check if ramp
return match_ramp_protocol(i, t)
def match_long_square_protocol(i, t, start_idx, end_idx):
#here we will do some analysis to determine if the stimulus is a long square, and if so, what the parameters are
fs = 1/(t[1] - t[0])
di = np.diff(i)
di_idx = np.flatnonzero(di) # != 0
if len(di_idx) == 0:
#if there are no up/down transitions, then this is not a long square
return None
if len(di_idx) == 1:
#if there is only one up/down transition, then this is not a long square
return None
#if len(di_idx) > 6:
#if there are more than 6 up/down transitions, then this is (probably) not a long square
# return
#check if its a long square by fitting a line to the dataset,
#and checking if the slope is 0
#if the slope is 0, then it is a long square
#if the slope is not 0, then it is not a long square
if len(di_idx) > 6:
y_data = i[start_idx: end_idx]
x_data = t[start_idx: end_idx]
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x_data, y_data)
if slope < 0.1 and p_value < 0.05 and r_value > 0.6:
return 'Long Square'
elif slope > 0.1 and p_value > 0.05:
return 'Long Square'
else:
return None
#ensure that the stim starts at 0, and ends at 0
if i[0] != 0:
return None
if i[-1] != 0:
return None
return "Long Square"
def match_short_square_protocol(stimulus_protocol, ontology):
#TODO: implement this function
pass
def match_ramp_protocol(stimulus_protocol, ontology):
#TODO: implement this function
pass
def match_unit(unit, ontology=_UNIT_ONTOLOGY):
#this function will take a unit and return the unit ontology
for unit_name in ontology:
check = [unit.upper() in x.upper() for x in ontology[unit_name]]
if np.any(check):
return unit_name
return None
def QC_voltage_data(t,v,i, zero_threshold=0.2, noise_threshold=10):
#this function will take a voltage trace and return a QC score
#Sometimes the voltage trace is not a voltage trace, but rather a current trace
#or with IGOR / MIES generated NWB files, the sweep was aborted halfway through, and there is a large jump in the voltage trace, and a bunhc of zeros
#this function will check for these things and return a QC score
#if the QC score is 0, then the sweep is bad
#if the QC score is 1, then the sweep is good
if v is None:
return 0
if i is None:
return 0
if len(v) == 0:
return 0
if np.any(v > 500) or np.any(v < -500): #membrane voltages are very very unlikely to be this large, this threshold could be lowered
return 0
#check for extended periods of 0
if np.sum(v == 0) > zero_threshold*len(v): #if more than 10% of the trace is 0, then it was probably aborted
#this is only a problem if the current is not 0
#check if while the voltage is 0, the current is 0
idx_zero = np.flatnonzero(np.isclose(v, 0))
if np.sum(i[idx_zero] != 0) > (zero_threshold/2)*len(idx_zero):
return 0
else:
return 1
#check for large jumps in the voltage trace
#dv = np.diff(v)
#if np.any(np.abs(dv) > 1e9):
#return 0
#todo, more qc checks
return 1
if __name__ == "__main__":
freeze_support()
#call main
main()
|
smestern/pyAPisolation
|
pyAPisolation/web_viz/build_database.py
|
build_database.py
|
py
| 22,652
|
python
|
en
|
code
| 1
|
github-code
|
6
|
75114038906
|
from timeit import default_timer as timer
import re
start = timer()
file = open('input.txt')
# exponential growth, every 7 days, after 0
# unsynchronized
# +2 day before first cycle
class LanternFish:
def __init__(self, initial_clock, spawn_clock, cycle):
self.clock = initial_clock
self.spawn = spawn_clock
self.cycle = cycle
def __str__(self):
return "%d" % (self.clock)
def __repr__(self):
return "%d" % (self.clock)
# returns baby if clock is up
def tick(self):
self.clock -= 1
if self.clock < 0:
self.clock = self.cycle
return LanternFish(self.spawn, self.spawn, self.cycle)
spawn = 8
cycle = 6
fishes = []
for initial_clock in file.readlines()[0].split(","):
fishes.append(LanternFish(int(initial_clock), spawn, cycle))
days = 80
for day in range(0, days):
bebes = []
for fish in fishes:
newb = fish.tick()
if newb is not None:
bebes.append(newb)
fishes.extend(bebes)
result = len(fishes)
print("Completed in %fms" % ((timer() - start) * 1000))
print("%d is the result" % result)
|
kmckenna525/advent-of-code
|
2021/day06/part1.py
|
part1.py
|
py
| 1,034
|
python
|
en
|
code
| 2
|
github-code
|
6
|
10933573696
|
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
utils,
)
from ansible_collections.alliedtelesis.awplus.plugins.module_utils.network.awplus.argspec.banner.banner import BannerArgs
class BannerFacts(object):
""" The awplus banner fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = BannerArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def get_run_conf(self, connection):
return connection.get('show running-config | include banner')
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for banner
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
# typically data is populated from the current device configuration
# data = connection.get('show running-config | section ^interface')
# using mock data instead
data = self.get_run_conf(connection)
# split the config into instances of the resource
resources = data.split('\n')
objs = []
for resource in resources:
if resource:
obj = self.render_config(self.generated_spec, resource)
if obj:
objs.append(obj)
ansible_facts['ansible_network_resources'].pop('banner', None)
facts = {}
if objs:
params = utils.validate_config(self.argument_spec, {'config': objs})
facts['banner'] = [utils.remove_empties(cfg) for cfg in params['config']]
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
motd = utils.parse_conf_arg(conf, 'banner motd')
if motd:
config['banner'] = 'motd'
config['text'] = motd
exec_text = utils.parse_conf_arg(conf, 'banner exec')
if exec_text:
config['banner'] = 'exec'
config['text'] = exec_text
return utils.remove_empties(config)
|
alliedtelesis/ansible_awplus
|
plugins/module_utils/network/awplus/facts/banner/banner.py
|
banner.py
|
py
| 2,915
|
python
|
en
|
code
| 7
|
github-code
|
6
|
27260781556
|
#Script for the first experiment of the multi-channel DART paper
#In this experiment, the performance of MC-DART is investigated for different number of channels and materials in the phantom,
# all averaged over 100 runs.
#Author,
# Mathé Zeegers,
# Centrum Wiskunde & Informatica, Amsterdam (m.t.zeegers@cwi.nl)
import astra
import numpy as np
import random
import pylab
import sys
import scipy.io
from ObjectAssembler import *
import MCDART
from HelperFunctions import *
from matplotlib.colors import ListedColormap
#Set random seed given by 'run' argument
if(len(sys.argv)>0):
np.random.seed(int(sys.argv[1]))
random.seed(int(sys.argv[1]))
#Path to folder to save the results
RESPATHPREFIX = "../results/MCDARTMaterialChannelExp"
def main():
NAngles = 32 #Number of projection angles
ARM = 'SIRT_CUDA' #Name of Algebraic Reconstruction Method to use
Startit = 10 #Iterations of the start reconstruction algorithm
MCDARTit = 10 #Number of MCDART iterations
ARMit = 10 #Iterations of the reconstruction algorithm in each MCDART iteration for each channel
FixProb = 0.99 #Fix probability - probability of 1-p of becoming a free pixel
diskOnly = True #Only let pixels inside the disk contribute to pixel error
smoothing = False #Use of smoothing in MCDART
r = 1 #Smoothing radius
b = 0.2 #Smoothing intensity
saveSpectrum = True #Save the material spectra
saveResults = True #Save the results
DetRed = True #Determines whether reduction of materials in phantom should be deterministic or random (set to False for exact paper results reproduction)
#Print settings
print("NAngles: ", NAngles, "\nStartit: ", Startit,"\nMCDARTit: ", MCDARTit,"\nARMit: ", ARMit,"\nFixProb: ", FixProb,"\ndiskOnly: ")
#Set (and create) specific saving folder
if(saveResults == True):
RESPATH = RESPATHPREFIX + "/ExpNAngles" + str(NAngles) + "ARM" + ARM + "Start" + str(Startit) + "MCDART" + str(MCDARTit) + "ARM" + str(ARMit) + "Fix" + str(FixProb)
if not os.path.exists(RESPATH):
os.makedirs(RESPATH)
#Set ranges for channels and materials (to reduce the phantom to)
minNoMaterials = 2
maxNoMaterials = 10
maxChannels = 10
#Supporting arrays for copying attenuation values of existing materials when another one is added to the phantom
temp = np.zeros((1,1))
temp2 = np.zeros((1,1))
#All pixel errors for this run
AllPixelErrors = np.zeros((maxNoMaterials-minNoMaterials+1, maxChannels))
if(saveSpectrum == True):
if not os.path.exists(RESPATH + "/MaterialSpectra"):
os.makedirs(RESPATH + "/MaterialSpectra")
#Loop over all materials and channels
for noMaterials in range(minNoMaterials, maxNoMaterials+1):
for noChannels in range(1,maxChannels+1):
print("Run", sys.argv[1], "#Materials:", noMaterials, ", #Channels:", noChannels)
#Load the phantom
if(len(sys.argv)>1):
TPh = Phantom("Nx128Nclass50Nchan1run" + str(sys.argv[1]) + ".tiff")
else:
TPh = Phantom("Nx128Nclass50Nchan1run1.tiff")
loadPhantomFile(TPh)
#Compute region of interest for pixel error
ROI = np.copy(TPh.MatArr)
if(diskOnly):
ROI[ROI > 1] = 1
else:
ROI.fill(1)
#Reduce the number of materials in the phantom (deterministically or randomly)
TPh.MatArr = reduceMaterials(TPh.MatArr, noMaterials, DetRed)
#Save reduced phantoms for a few configurations (run 1 and 2 or 10 materials)
if(saveResults and int(sys.argv[1]) == 1):
if(noMaterials == 2):
cmap = ListedColormap(['red', 'blue', 'yellow'], 'indexed')
FILEOUT = '../results/plots'
filename = 'Nx128Nclass50Nchan1run1CONVERTEDmat2'
pylab.imsave(FILEOUT + '/' + filename + '.png', TPh.MatArr, dpi=600, cmap=cmap)
pylab.imsave(FILEOUT + '/' + filename + '.eps', TPh.MatArr, cmap=cmap)
elif(noMaterials == 10):
cmap = ListedColormap(['red', 'blue', 'yellow', 'green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'white'], 'indexed')
FILEOUT = '../results/plots'
filename = 'Nx128Nclass50Nchan1run1CONVERTEDmat10'
pylab.imsave(FILEOUT + '/' + filename + '.png', TPh.MatArr, dpi=600, cmap=cmap)
pylab.imsave(FILEOUT + '/' + filename + '.eps', TPh.MatArr, cmap=cmap)
#Define channels (1 to #noChannels)
channels = np.arange(1,noChannels+1)
#Get number of materials in the reduced phantom
materials = np.unique(TPh.MatArr)
nomaterials = len(materials)
#Get number of channels and create random spectra
Channels = len(channels)
DiscMaterialSpectra = makeRandomDiscMaterialSpectra(nomaterials, Channels)
#Copy spectra of previously used materials
DiscMaterialSpectra[0:temp2.shape[0],:] = temp2[:,0:DiscMaterialSpectra.shape[1]]
DiscMaterialSpectra[0:temp.shape[0],0:temp.shape[1]] = temp
#Save the material spectra defined above
if(saveSpectrum and noMaterials == maxNoMaterials and noChannels == maxChannels):
if(len(sys.argv) > 0):
np.savetxt(RESPATH + "/MaterialSpectra/materialSpectraRun" + str(sys.argv[1]) + ".txt", DiscMaterialSpectra, fmt='%1.3f')
else:
np.savetxt(RESPATH + "/MaterialSpectra/materialSpectra.txt", DiscMaterialSpectra, fmt='%1.3f')
#Make material labels and attenuation spectra
del TPh.Labels[:]
for mat in materials:
TPh.Labels.append((mat, mat))
TPh.Labels.sort(key = operator.itemgetter(0))
for mat in TPh.Labels:
if(mat[0] != 0 and mat[1] != 0): #Exclude background
AtNo = mat[1]
if (AtNo > 0):
if AtNo not in [i[0] for i in TPh.AttenuationSpectra]: #Check if material is not already there
x, y = channels, DiscMaterialSpectra[AtNo][:]
if(noChannels > 1):
spectrum = scipy.interpolate.interp1d(x, y)
else:
spectrum = scipy.poly1d([y[0]])
attData = (x, y, spectrum)
TPh.AttenuationSpectra.append((AtNo,)+(mat[1],) + attData)
TPh.AttenuationSpectra.sort(key = operator.itemgetter(0)) #Keep sorted on number
#Run the MC-DART algorithm
pixelerror, seg = MCDART.MCDART(TPh, r, b, NAngles, ARM, Startit, MCDARTit, ARMit, FixProb, channels, materials, DiscMaterialSpectra, ROI = ROI, Smoothing = smoothing)
#Save the final segmentation
if(saveResults == True):
if not os.path.exists(RESPATH + "/Reconstructions"):
os.makedirs(RESPATH + "/Reconstructions")
colors = ['red', 'blue', 'yellow', 'green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'white']
cmap = ListedColormap(colors[0:nomaterials], 'indexed')
pylab.imsave(RESPATH + "/Reconstructions/FinalSegRun" + str(sys.argv[1]) + "NoMat" + str(noMaterials) + "noChannels" + str(noChannels) + ".png", seg, dpi=600, cmap=cmap)
pylab.imsave(RESPATH + "/Reconstructions/FinalSegRun" + str(sys.argv[1]) + "NoMat" + str(noMaterials) + "noChannels" + str(noChannels) + ".eps", seg, cmap=cmap)
#Update the array with pixel errors
AllPixelErrors[noMaterials-minNoMaterials][noChannels-1] = pixelerror
#Saves the material attenuations for the next (channel) iteration
temp = DiscMaterialSpectra
#Saves the material attenuations for the next (material) iteration
temp2 = DiscMaterialSpectra
temp = np.zeros((1,1))
#Pixel error for all material-channel combinations
print("Pixel errors for all material-channel combinations\n", AllPixelErrors)
#Save pixel error results
if(saveResults == True):
if not os.path.exists(RESPATH + "/PixelErrors"):
os.makedirs(RESPATH + "/PixelErrors")
np.savetxt(RESPATH + "/PixelErrors/pixelErrorRun" + str(sys.argv[1]) + ".txt", AllPixelErrors, fmt='%i')
if __name__ == "__main__":
main()
|
mzeegers/MC-DART
|
scripts/MCDARTExp1.py
|
MCDARTExp1.py
|
py
| 8,833
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75140441147
|
import numpy as np
class GradientDescent():
def __init__(self, X, y, w, loss, batch_size = None, reg_lambda = 0, update_X = False, seed = None):
'''input:
X: (n, m)
y: (n, 1)
loss: instace of class with at least two methods: "compute_loss" and "derivative"
'''
np.random.seed(seed)
self.X = X
self.y = y.reshape(-1,1)
self.w = w
self.loss = loss
self.batch_size = batch_size if batch_size is not None else self.X.shape[0]
self.reg_lambda = reg_lambda
self.loss.compute_cost(self.X, self.w, self.y, self.reg_lambda)
self.history = []
def jacobian(self, x_batch, y_batch):
self.gradient = self.loss.derivative(x_batch, self.w, y_batch, self.reg_lambda).reshape(x_batch.shape[1],1) # shape: mx1
def weight_update(self, x_batch, y_batch, learning_rate):
self.jacobian(x_batch, y_batch)
self.w -= learning_rate * self.gradient
def split_data(self):
data = np.concatenate((self.X, self.y), axis=1)
np.random.shuffle(data)
self.X = data[:, :-1]
self.y = data[:, -1].reshape(-1, 1)
q = self.X.shape[0] // self.batch_size
block_end = q * self.batch_size
X_batches = np.split(self.X[:block_end], q)
y_batches = np.split(self.y[:block_end], q)
if self.X.shape[0] % self.batch_size != 0:
X_batches = X_batches + [self.X[block_end:]]
y_batches = y_batches + [self.y[block_end:]]
return X_batches, y_batches
def gradient_descent(self, learning_rate = 0.1, max_iter = 100, threshold = 1e-3, debug = False):
X_batches, y_batches = self.split_data()
i = 0
if debug:
print('n batches {}\ninitial weights {}'.format(len(X_batches), self.w))
while(i < max_iter and self.loss.cost > threshold):
for x_batch, y_batch in zip(X_batches, y_batches):
self.weight_update(x_batch, y_batch, learning_rate)
self.loss.compute_cost(self.X, self.w, self.y, self.reg_lambda)
self.history.append(self.loss.cost)
if debug:
print('\n****iter {}'.format(i+1))
print('gradient {}'.format(self.gradient))
print('weights {}'.format(self.w))
print('cost {}'.format(self.history[-1]))
i += 1
|
Enrico4444/AlgosFromScratch
|
utils/gradient_descent.py
|
gradient_descent.py
|
py
| 2,442
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30060227014
|
#! /usr/bin/env python3
'''
invocation: ram_gen.py -width 10 -depth 20 [-name myram] [-help] [-mon] [-addr Addr] [-din Din] [-dout Dout] [-wr wr | ~wr] [-cs cs | ~cs] [-clk Clk]
kmon will add $display lines to verilog to help keep track of writes/reads.
-din -dout -addr -cs -wr -clk : all these enable renaming of pins.
-cs and -wr have option to have ~ infront. it makes them active low.
'''
import os,sys,string,types
def main():
Params = parse_args()
if 'help' in Params:
print(__doc__)
return
create_defaults(Params)
create_ram(Params)
def create_defaults(Params):
if 'name' not in Params:
Wid = paramx(Params,'width')
Dep = paramx(Params,'depth')
Name = 'ram%s_%s'%(Wid,Dep)
Params['name']=Name
for Pin in ['cs','wr','din','dout','addr','clk']:
if Pin not in Params:
Params[Pin]=Pin
if 'wmask' not in Params:
Params['wmask']=False
if Params['cs'][0]=='~':
Params['acs']='~'+Params['cs']
else:
Params['acs']=Params['cs']
if Params['wr'][0]=='~':
Params['awr']='~'+Params['wr']
Params['negawr']=Params['wr']
else:
Params['awr']=Params['wr']
Params['negawr']='~'+Params['wr']
ReplaceList = ['wmask','acs','negawr','awr','clk','addr','cs','wr','din','dout']
def createInstanceExample(File,Params,Wid,Dep,Name,Awid):
if Params['wmask']:
Stringx = InstMaskString
else:
Stringx = InstString
Stringx = Stringx.replace('WID',str(Wid-1))
Stringx = Stringx.replace('WAD',str(Awid-1))
Stringx = Stringx.replace('NAME',Name)
for Key in ReplaceList:
KEY = Key.upper()
Repl = paramx(Params,Key)
if Repl:
Stringx = Stringx.replace(KEY,Repl)
File.write(Stringx)
def create_ram(Params):
Wid = paramx(Params,'width')
Dep = paramx(Params,'depth')
Name = paramx(Params,'name')
Awid = bits_needed(Dep)
Fname = '%s.v'%(Name)
print('>>>>>',Fname,Params,Name)
File = open(Fname,'w')
if Params['wmask']:
Stringx = String0Mask
else:
Stringx = String0
Stringx = Stringx.replace('WID',str(Wid-1))
Stringx = Stringx.replace('WAD',str(Awid-1))
Stringx = Stringx.replace('NAME',Name)
for Key in ReplaceList:
KEY = Key.upper()
Repl = paramx(Params,Key)
if Repl:
Stringx = Stringx.replace(KEY,Repl)
File.write(Stringx)
File.write('reg [%d:0] mem [0:%d];\n'%(Wid-1,Dep-1))
File.write('reg [%d:0] tmp;\n'%(Wid-1))
if Dep<(1<<Awid):
StrAddr = StringAddrPart
else:
StrAddr = StringAddrFull
StrAddr = StrAddr.replace('ADDR',paramx(Params,'addr'))
StrAddr = StrAddr.replace('DEPTH',str(paramx(Params,'depth')))
File.write(StrAddr)
if Params['wmask']:
Stringx = String1Mask
else:
Stringx = String1
Stringx = Stringx.replace('WID',str(Wid))
for Key in ReplaceList:
KEY = Key.upper()
Repl = paramx(Params,Key)
if Repl:
Stringx = Stringx.replace(KEY,Repl)
File.write(Stringx)
if 'mon' in Params:
Stringx = String2.replace('WID',str(Wid))
for Key in ReplaceList:
KEY = Key.upper()
Repl = paramx(Params,Key)
if Repl:
Stringx = Stringx.replace(KEY,Repl)
File.write(Stringx)
File.write('endmodule\n')
createInstanceExample(File,Params,Wid,Dep,Name,Awid)
File.close()
InstString = '''
// example instance
// NAME NAME (.clk(CLK),.CS(CS),.WR(WR),.DIN(DIN),.DOUT(DOUT),.ADDR(ADDR));
'''
InstMaskString = '''
// example instance
// NAME NAME (.clk(CLK),.CS(CS),.WR(WR),.DIN(DIN),.DOUT(DOUT),.ADDR(ADDR),.WMASK(WMASK));
'''
String0 = '''
module NAME (input CLK,input CS,input WR
,input [WID:0] DIN
,output [WID:0] DOUT
,input [WAD:0] ADDR
);
'''
String0Mask = '''
module NAME (input CLK,input CS,input WR
,input [WID:0] DIN
,input [WID:0] WMASK
,output [WID:0] DOUT
,input [WAD:0] ADDR
);
'''
StringAddrPart = '''
wire addr_ok = ((^ADDR)!==1'bx)&&(ADDR<DEPTH);
'''
StringAddrFull = '''
wire addr_ok = ((^ADDR)!==1'bx);
'''
String1 = '''
wire inps_ok = (^{CS,WR}!==1'bx)&&addr_ok;
assign #1 DOUT = tmp;
always @(posedge CLK) if (inps_ok && ACS && AWR) begin
mem[ADDR]<=DIN;
end
always @(posedge CLK) begin
if (inps_ok && ACS && NEGAWR) tmp <= mem[ADDR];
else tmp <= WID'bx;
end
'''
String1Mask = '''
wire inps_ok = ((^{WMASK,CS,WR})!==1'bx)&&addr_ok;
assign #1 DOUT = tmp;
always @(posedge CLK) if (inps_ok && ACS && AWR) begin
mem[ADDR]<=(WMASK & DIN)|(mem[ADDR] & ~WMASK);
end
always @(posedge CLK) begin
if (inps_ok && ACS && NEGAWR) tmp <= mem[ADDR];
else tmp <= WID'bx;
end
'''
String2 = '''
always @(posedge clk) begin
if (inps_ok) begin
if (ACS && AWR) begin
$display(" @%d: ram %m write [%h] <= %h",$stime,ADDR,DIN);
end
if (ACS && NEGAWR) begin
$display(" @%d: ram %m read %h <= [%h] ",$stime,mem[ADDR],ADDR);
end
end else if ($stime>1000) begin
$display(" @%d: ram %m bad controls ADDR=%h CS=%h WR=%h",$stime,ADDR,CS,WR);
end
end
'''
def parse_args():
params={}
fnames=[]
state='idle'
for X in sys.argv[1:]:
if (state=='idle'):
if (X[0]=='-'):
state='param'
Param = X[1:]
else:
fnames += [X]
elif (state=='param'):
if (X[0]=='-'):
params[Param]='yes'
state='param'
Param = X[1:]
else:
state='idle'
params[Param]=mint(X)
if (state=='param'):
params[Param]='yes'
if fnames!=[]:
params['fnames']=fnames
return params
def mint(X):
try:
return int(X)
except:
return X
def paramx(Params,Prm,Default=None):
if Prm in Params:
X = Params[Prm]
if (type(X)is str)and(X[0]=='~'):
return X[1:]
try:
return eval(X)
except:
return X
if Default!=None:
return Default
print('ERROR! params was asked to get param=%s, not found and no default'%Prm)
print('given params %s'%str(Params))
print(__doc__)
sys.exit()
def bits_needed(Int):
Bin = bin(Int-1)[2:]
# if (Bin[0]=='1')and('1' not in Bin[1:]):
# return len(Bin)
return len(Bin)
main()
|
greenblat/vlsistuff
|
pybin3/ram_gen.py
|
ram_gen.py
|
py
| 6,540
|
python
|
en
|
code
| 41
|
github-code
|
6
|
1173013676
|
from hand import Hand
from deck import Deck
class Play:
def play(self):
wins = 0
losses = 0
games_played = 0
cont = True
print("\n---------------------------------------------------------------------------------\n")
print("\n Welcome to Blackjack! \n")
print("\n---------------------------------------------------------------------------------\n")
while(cont):
games_played+=1
deck = Deck()
deck.shuffle()
player = Hand()
dealer = Hand()
print("\nPlayer is dealt: ")
print(player.hit(deck.deal()))
dealer.hit(deck.deal())
print("\nPlayer is dealt: ")
print(player.hit(deck.deal()))
print("\nDealer is dealt: ")
print(dealer.hit(deck.deal()))
print("\nPlayer's hand: ", player.cards[0], player.cards[1])
print("\nPlayer's hand value: ", player.value)
if player.check_bj() == 2:
if self.check_win(player, dealer) == 1:
wins +=1
else:
print("\nDealer's hand: ", dealer.cards[1], " *hidden card*\n")
self.make_choice(deck, player)
if player.value > 21:
self.bust(player)
losses += 1
else:
if(dealer.value >= 17):
result = self.check_win(player, dealer)
if result == 1:
wins +=1
elif result == 2:
losses += 1
else:
print("\nPlayer value = ", player.value)
print("\nDealer's hand: ", dealer.cards[0], dealer.cards[1])
while(dealer.value < 17):
print("\nDealer value = ", dealer.value)
print("\nDealer is dealt: ")
print(dealer.hit(deck.deal()))
result = self.check_win(player, dealer)
if result == 1:
wins +=1
elif result == 2:
losses += 1
print("\n Total Wins: ", wins)
print("\n Total Losses: ", losses)
print("\n Total Games Played: ", games_played)
good = True
while good:
again = input("\nPlay Again? (Enter Y/N): ")
if again.upper() == "N":
cont = False
good = False
elif again.upper() == "Y":
cont = True
good = False
else:
good = True
print("\nPlease input either Y for Yes or N for No.")
def make_choice(self, deck, hand):
valid = True
while(valid):
choice = input("\nWould you like to hit or stand? (Enter H to hit, S to stand): ")
if choice.upper() == "H":
print("\nPlayer is dealt: ")
print(hand.hit(deck.deal()))
if hand.value >= 21:
valid = False
else:
print("\nPlayer value = ", hand.value)
elif choice.upper() == "S":
valid = False
else:
print("\nPlease input either H for Hit or S for Stand.")
def check_win(self, player, dealer):
print("\nPlayer value = ", player.value)
print("\nDealer value = ", dealer.value)
if player.value > dealer.value:
print("\nPlayer wins!!!")
return 1
elif player.value == dealer.value:
print("\nThey are equal! It's a push!")
return 3
elif dealer.value > 21:
print("\nDealer Busted! Player Wins!")
return 1
else:
print("Oh No! Dealer Wins!")
return 2
def bust(self, player):
print("\n Player value: ", player.value)
print("\n You Busted! Unlucky!")
|
IamFyrus/Blackjack
|
Blackjack/play.py
|
play.py
|
py
| 4,509
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33917530992
|
from dateutil.parser import parse as parse_date
from flask import current_app
from inspire_dojson import record2marcxml
from inspire_utils.record import get_value
from lxml import etree
def dumps_etree(pid, record, **kwargs):
"""Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
"""
r = record['_source']
# adding legacy version (controlfield 005)
acquisition_date = parse_date(r['acquisition_source']['date'])
r['legacy_version'] = acquisition_date.strftime("%Y%m%d%H%M%S.0")
# adding number of pages (datafield 300)
page_nr = get_value(r, 'page_nr[0]')
if page_nr:
r['number_of_pages'] = page_nr
# create and add download url
if 'urls' not in r and '_files' in r:
files = []
for f in r['_files']:
url = 'http://%s/api/files/%s/%s' % (current_app.config.get('SERVER_NAME'), f['bucket'], f['key'])
files.append({
'value': url,
'description': f.get('filetype', '')
})
r['urls'] = files
return etree.fromstring(record2marcxml(r))
|
SCOAP3/scoap3-next
|
scoap3/modules/records/oai_serializer.py
|
oai_serializer.py
|
py
| 1,264
|
python
|
en
|
code
| 2
|
github-code
|
6
|
33036426825
|
"""Config flow for UniFi."""
import socket
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
CONTROLLER_ID,
DEFAULT_POE_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
LOGGER,
)
from .controller import get_controller
from .errors import AlreadyConfigured, AuthenticationRequired, CannotConnect
DEFAULT_PORT = 8443
DEFAULT_SITE_ID = "default"
DEFAULT_VERIFY_SSL = False
@callback
def get_controller_id_from_config_entry(config_entry):
"""Return controller with a matching bridge id."""
return CONTROLLER_ID.format(
host=config_entry.data[CONF_CONTROLLER][CONF_HOST],
site=config_entry.data[CONF_CONTROLLER][CONF_SITE_ID],
)
class UnifiFlowHandler(config_entries.ConfigFlow, domain=UNIFI_DOMAIN):
"""Handle a UniFi config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return UnifiOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the UniFi flow."""
self.config = None
self.desc = None
self.sites = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
self.config = {
CONF_HOST: user_input[CONF_HOST],
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_PORT: user_input.get(CONF_PORT),
CONF_VERIFY_SSL: user_input.get(CONF_VERIFY_SSL),
CONF_SITE_ID: DEFAULT_SITE_ID,
}
controller = await get_controller(self.hass, **self.config)
self.sites = await controller.sites()
return await self.async_step_site()
except AuthenticationRequired:
errors["base"] = "faulty_credentials"
except CannotConnect:
errors["base"] = "service_unavailable"
except Exception: # pylint: disable=broad-except
LOGGER.error(
"Unknown error connecting with UniFi Controller at %s",
user_input[CONF_HOST],
)
return self.async_abort(reason="unknown")
host = ""
if await async_discover_unifi(self.hass):
host = "unifi"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): bool,
}
),
errors=errors,
)
async def async_step_site(self, user_input=None):
"""Select site to control."""
errors = {}
if user_input is not None:
try:
desc = user_input.get(CONF_SITE_ID, self.desc)
for site in self.sites.values():
if desc == site["desc"]:
self.config[CONF_SITE_ID] = site["name"]
break
for entry in self._async_current_entries():
controller = entry.data[CONF_CONTROLLER]
if (
controller[CONF_HOST] == self.config[CONF_HOST]
and controller[CONF_SITE_ID] == self.config[CONF_SITE_ID]
):
raise AlreadyConfigured
data = {CONF_CONTROLLER: self.config}
return self.async_create_entry(title=desc, data=data)
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
if len(self.sites) == 1:
self.desc = next(iter(self.sites.values()))["desc"]
return await self.async_step_site(user_input={})
sites = []
for site in self.sites.values():
sites.append(site["desc"])
return self.async_show_form(
step_id="site",
data_schema=vol.Schema({vol.Required(CONF_SITE_ID): vol.In(sites)}),
errors=errors,
)
class UnifiOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Unifi options."""
def __init__(self, config_entry):
"""Initialize UniFi options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.controller = None
async def async_step_init(self, user_input=None):
"""Manage the UniFi options."""
self.controller = self.hass.data[UNIFI_DOMAIN][self.config_entry.entry_id]
self.options[CONF_BLOCK_CLIENT] = self.controller.option_block_clients
if self.show_advanced_options:
return await self.async_step_device_tracker()
return await self.async_step_simple_options()
async def async_step_simple_options(self, user_input=None):
"""For simple Jack."""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
clients_to_block = {}
for client in self.controller.api.clients.values():
clients_to_block[
client.mac
] = f"{client.name or client.hostname} ({client.mac})"
return self.async_show_form(
step_id="simple_options",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TRACK_CLIENTS,
default=self.controller.option_track_clients,
): bool,
vol.Optional(
CONF_TRACK_DEVICES,
default=self.controller.option_track_devices,
): bool,
vol.Optional(
CONF_BLOCK_CLIENT, default=self.options[CONF_BLOCK_CLIENT]
): cv.multi_select(clients_to_block),
}
),
)
async def async_step_device_tracker(self, user_input=None):
"""Manage the device tracker options."""
if user_input is not None:
self.options.update(user_input)
return await self.async_step_client_control()
ssids = (
set(self.controller.api.wlans)
| {
f"{wlan.name}{wlan.name_combine_suffix}"
for wlan in self.controller.api.wlans.values()
if not wlan.name_combine_enabled
}
| {
wlan["name"]
for ap in self.controller.api.devices.values()
for wlan in ap.wlan_overrides
if "name" in wlan
}
)
ssid_filter = {ssid: ssid for ssid in sorted(list(ssids))}
return self.async_show_form(
step_id="device_tracker",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TRACK_CLIENTS,
default=self.controller.option_track_clients,
): bool,
vol.Optional(
CONF_TRACK_WIRED_CLIENTS,
default=self.controller.option_track_wired_clients,
): bool,
vol.Optional(
CONF_TRACK_DEVICES,
default=self.controller.option_track_devices,
): bool,
vol.Optional(
CONF_SSID_FILTER, default=self.controller.option_ssid_filter
): cv.multi_select(ssid_filter),
vol.Optional(
CONF_DETECTION_TIME,
default=int(
self.controller.option_detection_time.total_seconds()
),
): int,
vol.Optional(
CONF_IGNORE_WIRED_BUG,
default=self.controller.option_ignore_wired_bug,
): bool,
}
),
)
async def async_step_client_control(self, user_input=None):
"""Manage configuration of network access controlled clients."""
errors = {}
if user_input is not None:
self.options.update(user_input)
return await self.async_step_statistics_sensors()
clients_to_block = {}
for client in self.controller.api.clients.values():
clients_to_block[
client.mac
] = f"{client.name or client.hostname} ({client.mac})"
return self.async_show_form(
step_id="client_control",
data_schema=vol.Schema(
{
vol.Optional(
CONF_BLOCK_CLIENT, default=self.options[CONF_BLOCK_CLIENT]
): cv.multi_select(clients_to_block),
vol.Optional(
CONF_POE_CLIENTS,
default=self.options.get(CONF_POE_CLIENTS, DEFAULT_POE_CLIENTS),
): bool,
}
),
errors=errors,
)
async def async_step_statistics_sensors(self, user_input=None):
"""Manage the statistics sensors options."""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
return self.async_show_form(
step_id="statistics_sensors",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_BANDWIDTH_SENSORS,
default=self.controller.option_allow_bandwidth_sensors,
): bool
}
),
)
async def _update_options(self):
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
async def async_discover_unifi(hass):
"""Discover UniFi address."""
try:
return await hass.async_add_executor_job(socket.gethostbyname, "unifi")
except socket.gaierror:
return None
|
84KaliPleXon3/home-assistant-core
|
homeassistant/components/unifi/config_flow.py
|
config_flow.py
|
py
| 11,066
|
python
|
en
|
code
| 1
|
github-code
|
6
|
3439919651
|
from sortedcontainers import SortedDict
class Node:
def __init__(self, val=None):
self.val = val
self.next = None
self.last = None
class MaxStack:
def __init__(self):
self.dic = SortedDict()
self.root = Node()
self.root.last, self.root.next = self.root, self.root
def push(self, x: int) -> None:
if x not in self.dic:
self.dic[x] = []
node = Node(x)
self.root.next.last, self.root.next, node.last, node.next = node, node, self.root, self.root.next
self.dic[x].append(node)
def pop(self) -> int:
node = self.root.next
node.next.last, node.last.next = node.last, node.next
node_lst = self.dic[node.val]
node_lst.pop()
if len(node_lst) == 0:
self.dic.pop(node.val)
return node.val
def top(self) -> int:
return self.root.next.val
def peekMax(self) -> int:
return self.dic.peekitem()[0]
def popMax(self) -> int:
val, node_lst = self.dic.peekitem()
node = node_lst.pop()
if len(node_lst) == 0:
self.dic.pop(val)
node.next.last, node.last.next = node.last, node.next
return val
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
|
cuiy0006/Algorithms
|
leetcode/716. Max Stack.py
|
716. Max Stack.py
|
py
| 1,463
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74492711866
|
num1 = int(input("Number 1: "))
num2 = int(input("Number 2: "))
factors1, factors2 = [], []
def factoriser(arr, num):
for i in range(num):
if num%(i+1) == 0 and i+1 != num:
arr.append(i+1)
factoriser(factors1, num1)
factoriser(factors2, num2)
if (sum(factors1) == num2) and (sum(factors2) == num1) and num1 != num2:
print("Amicable numbers.")
else:
print("Not amicable.")
"""
Difficulty: E
This one was fairly easy to do, the only real challenge here is making a
function to find the factors of each input, the rest is elementary.
"""
|
Pararcana/British-Informatics-Olympiad-Python
|
1996/Q1 - Amicable Numbers [E] .py
|
Q1 - Amicable Numbers [E] .py
|
py
| 555
|
python
|
en
|
code
| 1
|
github-code
|
6
|
13041202153
|
import falcon
import json
import logging
logger = logging.getLogger(__name__)
class Correlation:
def __init__(self, store):
self.__store = store
def on_get(self, req, resp):
params = req.params
logger.info('request: {}'.format(params))
if 'series1' not in params or 'series2' not in params:
resp.status = falcon.HTTP_400
resp.body = json.dumps({'error message': 'bad series parameters'})
else:
resp.status = falcon.HTTP_200
resp.body = json.dumps({'corr': [[1, 2], [2, 3]]})
|
Qinode/final-visual-api
|
src/resources/data/corr.py
|
corr.py
|
py
| 576
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73076321467
|
from __future__ import annotations
import os
from typing import Callable, TYPE_CHECKING
if TYPE_CHECKING:
from bot.translator import Translator
app_name = "TTMediaBot"
app_version = "2.3.1"
client_name = app_name + "-V" + app_version
about_text: Callable[[Translator], str] = lambda translator: translator.translate(
"""\
A media streaming bot for TeamTalk.
Authors: Amir Gumerov, Vladislav Kopylov, Beqa Gozalishvili, Kirill Belousov.
Home page: https://github.com/gumerov-amir/TTMediaBot\
License: Mit License\
"""
)
fallback_service = "yt"
loop_timeout = 0.01
max_message_length = 256
recents_max_lenth = 32
tt_event_timeout = 2
directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
gumerov-amir/TTMediaBot
|
bot/app_vars.py
|
app_vars.py
|
py
| 715
|
python
|
en
|
code
| 52
|
github-code
|
6
|
43959470416
|
import datetime
import os
import random
import sys
from itertools import islice
from typing import List, Generator, Iterator
folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'ch03-mem-and-variables'))
sys.path.insert(0, folder)
import size_util
random.seed(42)
def main():
# Took 83 MB in naive mode
start_mem = report("Starting")
t0 = datetime.datetime.now()
original = load_data(); report("Load")
filtered = filter_data(original); report("filtered")
scaled = scale_data(filtered, 2.718); report("scaled")
# Need to work with it over and over and index it?
# scaled = list(scaled)
print("Head", list(islice(scaled, 0, 10)))
tail = []
for n in scaled:
tail.append(n)
if len(tail) > 10:
tail.pop(0)
print("Tail", tail)
final_mem = report("done")
dt = datetime.datetime.now() - t0
print(f"Done, mem usage: {final_mem-start_mem:,.0f} MB, in {dt.total_seconds():.2f} sec")
def report(step_name: str):
print(f"{step_name}:", end=' ')
return size_util.report_process_mem()
def load_data() -> Iterator[int]:
return (random.randint(1_000, 10_000) for _ in range(1, 1_000_000))
def filter_data(data: Iterator[int]) -> Iterator[int]:
for n in data:
if n % 5 != 0:
yield n
def scale_data(data: Iterator[int], factor: float) -> Iterator[float]:
return (
n * factor
for n in data
)
if __name__ == '__main__':
main()
|
talkpython/python-memory-management-course
|
code/ch07-mem-and-functions/app_one_at_a_time.py
|
app_one_at_a_time.py
|
py
| 1,496
|
python
|
en
|
code
| 39
|
github-code
|
6
|
71442915067
|
import sys
import os
import logging
from datetime import datetime
from logging.handlers import TimedRotatingFileHandler
from logging import StreamHandler
from panda3d.core import (
loadPrcFile,
Filename,
ConfigVariableBool,
)
def setup_log(editor_name, log_to_console=False, log_level=logging.DEBUG):
# check if we have a config file
home = os.path.expanduser("~")
basePath = os.path.join(home, f".{editor_name}")
if not os.path.exists(basePath):
os.makedirs(basePath)
logPath = os.path.join(basePath, "logs")
if not os.path.exists(logPath):
os.makedirs(logPath)
# Remove log files older than 30 days
for f in os.listdir(logPath):
fParts = f.split(".")
fDate = datetime.now()
try:
fDate = datetime.strptime(fParts[-1], "%Y-%m-%d_%H")
delta = datetime.now() - fDate
if delta.days > 30:
#print(f"remove {os.path.join(logPath, f)}")
os.remove(os.path.join(logPath, f))
except Exception:
# this file does not have a date ending
pass
log_file = os.path.join(logPath, f"{editor_name}.log")
handler = TimedRotatingFileHandler(log_file)
logHandlers = [handler]
if log_to_console:
consoleHandler = StreamHandler()
logHandlers.append(consoleHandler)
logging.basicConfig(
level=log_level,
handlers=logHandlers)
for root, dirs, files in os.walk(basePath):
for f in files:
if not f.endswith(".prc"):
continue
config_file = os.path.join(root, f)
loadPrcFile(config_file)
config_file = os.path.join(basePath, f".{editor_name}.prc")
if os.path.exists(config_file):
loadPrcFile(Filename.fromOsSpecific(config_file))
else:
with open(config_file, "w") as prcFile:
prcFile.write("skip-ask-for-quit #f\n")
prcFile.write("frame-enable-scene-editor #t\n")
prcFile.write("frame-enable-gui-editor #t\n")
return log_file, config_file
|
fireclawthefox/FRAME
|
panda3d_frame/editorLogHandler.py
|
editorLogHandler.py
|
py
| 2,086
|
python
|
en
|
code
| 12
|
github-code
|
6
|
70488681147
|
# accepted on coderun
import random
import sys
import time
length: int # = 98
arr: list[int] # = [1, 1, 1, 2, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 7]
s_tree_l: int # = 4 * length
max_tree: list[tuple[int, int]] # = [(0, 0) for _ in range(s_tree_l)]
postponed_update: list[tuple[int, int]]
p: int
def process_queries():
global length, arr, s_tree_l, max_tree, postponed_update, p
n, m, q, chunks_on_servers, queries = get_pars()
# print(f'chunks_on_servers: {chunks_on_servers}')
length = n
p = 1
while p < length:
p *= 2
s_tree_l = 2 * p
arr = chunks_on_servers
max_tree = [(0, 0) for _ in range(s_tree_l)]
postponed_update = [(0, 0) for _ in range(s_tree_l)]
build()
# print(f'max_tree: {max_tree}')
# main cycle:
for s_out, s_in, left, right in queries:
# max queries:
max_, max_q = get_max(left - 1, right - 1) # 36.6 98
# print(f'max_, max_q: {max_, max_q}')
if max_ == s_out and max_q == right - left + 1:
# updates:
range_update(left - 1, right - 1, s_in)
print(f'1')
else:
print(f'0')
def get_pars():
n, m, q = map(int, input().split(' '))
chunks_on_servers = list(map(int, input().split(' ')))
queries = [map(int, input().split(' ')) for _ in range(q)]
return n, m, q, chunks_on_servers, queries
def combine(t1: tuple[int, int], t2: tuple[int, int]) -> tuple[int, int]:
if t1[0] > t2[0]:
return t1
if t2[0] > t1[0]:
return t2
return t1[0], t1[1] + t2[1]
def update(vert_ind: int):
global postponed_update, max_tree
if postponed_update[vert_ind][0] != 0 and vert_ind < p:
print(f'postponed update: {vert_ind}')
left_vert_ind = vert_ind << 1
right_vert_ind = left_vert_ind + 1
max_, max_q_ = postponed_update[vert_ind]
max_tree[left_vert_ind] = max_, (max_q_ + 1) // 2
max_tree[right_vert_ind] = max_, max_q_ // 2
postponed_update[left_vert_ind] = max_, (max_q_ + 1) // 2
postponed_update[right_vert_ind] = max_, max_q_ // 2
postponed_update[vert_ind] = (0, 0)
def build():
def build_(vert_ind: int, left_: int, right_: int) -> None:
# border case:
if left_ == right_:
max_tree[vert_ind] = arr[left_], 1
# recurrent relation:
else:
middle = (left_ + right_) // 2
i_ = vert_ind << 1
build_(i_, left_, middle)
build_(i_ + 1, middle + 1, right_)
max_tree[vert_ind] = combine(max_tree[i_], max_tree[i_ + 1])
build_(1, 0, length - 1)
def get_max(ql: int, qr: int) -> tuple[int, int]:
def get_max_(vert_ind: int, left_: int, right_: int, ql_: int, qr_: int) -> tuple[int, int]:
# print(f'left_, right_: {left_, right_}, ql_, qr_: {ql_, qr_}')
# border cases:
if ql_ > qr_:
return -1, 0
if (left_, right_) == (ql_, qr_):
return max_tree[vert_ind]
update(vert_ind)
# recurrent relation:
middle = (left_ + right_) // 2
i_ = vert_ind << 1
return combine(
get_max_(i_, left_, middle, ql_, min(qr_, middle)),
get_max_(i_ + 1, middle + 1, right_, max(ql_, middle + 1), qr_)
)
return get_max_(1, 0, length - 1, ql, qr)
def range_update(ql: int, qr: int, new_val: int):
def range_update_(vert_ind: int, left_: int, right_: int, ql_: int, qr_: int) -> None:
# print(f'{counter}. left_, right_: {left_, right_}, ql_, qr_: {ql_, qr_}')
# border cases:
if ql_ > qr_:
return
if (left_, right_) == (ql_, qr_):
max_tree[vert_ind] = postponed_update[vert_ind] = new_val, right_ - left_ + 1 # 36.6 98
return
update(vert_ind)
# recurrent relation:
middle = (left_ + right_) // 2
i_ = vert_ind << 1
range_update_(i_, left_, middle, ql_, min(qr_, middle))
range_update_(i_ + 1, middle + 1, right_, max(middle + 1, ql_), qr_)
max_tree[vert_ind] = combine(max_tree[i_], max_tree[i_ + 1])
range_update_(1, 0, length - 1, ql, qr)
process_queries()
|
LocusLontrime/Python
|
Yandex_fast_recruit_days/Hard/chunks_moving.py
|
chunks_moving.py
|
py
| 4,264
|
python
|
en
|
code
| 1
|
github-code
|
6
|
70788138747
|
import sys
import numpy as np
import util
from regression.linreg import NormalEquationLinearRegressor, GradientDescentLinearRegressor
from preprocess import reader, scaling
from validation import CrossValidator
def main():
if len(sys.argv) < 2:
print("Usage:\n\t{} [housing-data]".format(sys.argv[0]))
sys.exit(1)
dataset = reader.read(sys.argv[1], delim=' ')
# Exapand features with nonlinear functions
# Need to put them back together to handle
features, labels = util.fldivide(dataset)
features, scale = scaling.unit_scale(features)
features = util.basis_expand(features, lambda x: x ** 2, lambda x: x ** 3)
features = np.hstack([features, np.ones((len(features), 1))])
dataset = util.fljoin(features, labels)
reg = NormalEquationLinearRegressor(regularization=1e-8)
cv = CrossValidator(reg)
feat_indices, feat_errors = cv.best_3features_topN(dataset, n=5)
for indices, err in zip(feat_indices, feat_errors):
bestfeats = np.dstack([features[:, i] for i in indices]).squeeze()
data = util.fljoin(bestfeats, labels)
reg.train(data)
print(reg.w)
print("indices = {}, err = {}".format(indices, err))
main()
|
get9/ml-test
|
houselinreg.py
|
houselinreg.py
|
py
| 1,227
|
python
|
en
|
code
| 1
|
github-code
|
6
|
72729149627
|
import cv2
from skimage.metrics import structural_similarity as ssim
import numpy as np
from PIL import Image, ImageChops
import matplotlib.pyplot as plt
################################################################
########### USING PIXEL COMPARISON #########################
############## IMPORTANT TO READ !! #########################
#1 for maps with real data where Gmapping gives maps with #fefefe instead of #ffffff, and grey with #D8D8D8
# 2 for maps with maching colors, usually simulated data
# 3 for comparing maps with error, or maps where there are many tonalities of white
val = 1
#Path to the map generated by our algorithm
img1= Image.open('C:/Users/jc/Documents/GitHub/saut_ogm/image-comparison/ALL_occ-0.8_free-0.25_cell-0.05_wall-0.05.png')
#Path to the reference map
img2= Image.open('C:/Users/jc/Documents/GitHub/saut_ogm/image-comparison/mape2.png')
#Convert to RGB
img1 = img1.convert("RGB")
img2 = img2.convert("RGB")
#Resize images
width = max(img1.width, img2.width)
height = max(img1.height, img2.height)
img1 = img1.resize((width, height))
img2 = img2.resize((width, height))
# Convert the images to NumPy arrays
pixels1 = np.array(img1)
pixels2 = np.array(img2)
# Find white pixels in img1 and img2
white_pixels_1 = np.all(pixels1 == [255, 255, 255], axis=2)
#Maps that come from Gmapping are not 100% white, they are #fefefe
if val == 1:
white_pixels_2 = np.all(pixels2 == [254, 254, 254], axis=2)
elif val ==3:
min_value = [180, 180, 180]
max_value = [255, 255, 255]
white_pixels_2 = np.all((pixels2 >= min_value) & (pixels2 <= max_value), axis=2)
else:
white_pixels_2 = np.all(pixels2 == [255, 255, 255], axis=2)
# Initialize a counter for the different white pixels
count_white1 = np.sum(white_pixels_1)
count_white2 = np.sum(white_pixels_2)
#Interception
intersecting_pixels = np.sum(white_pixels_1 & white_pixels_2)
total_white_pixels = count_white1 + count_white2 - intersecting_pixels
#total_white_pixels = np.sum(white_pixels_1)
percentage_diff_white = (intersecting_pixels / total_white_pixels) * 100
#Print results
print("Using Only White Comparison:");
print("white 1: " + str(np.sum(white_pixels_1)))
print("white 2: " + str(np.sum(white_pixels_2)))
print("size: " + str(height*width))
print("Number of Intersecting Pixels: " + str(intersecting_pixels))
print("Percentage of equal pixels: " + str(percentage_diff_white))
diff = np.sum(pixels1!=pixels2)
# Create a copy of img1
highlighted_img1 = np.array(img1)
highlighted_img2 = np.array(img2)
diff_pixels = np.any(pixels1 != pixels2, axis=-1)
if val == 1:
map_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2 == [254, 254, 254], axis=-1),
np.all(pixels1 == [0, 0, 0], axis=-1) & np.all(pixels2 == [0, 0, 0], axis=-1),
)
equal_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2 == [254, 254, 254], axis=-1),
np.all(pixels1 == [149, 149, 149], axis=-1) & np.all(pixels2 == [216, 216, 216], axis=-1)
)
diff_pixels = np.logical_and(diff_pixels, np.logical_not(equal_pixels))
elif val == 3:
pixels2_range = (pixels2 >= min_value) & (pixels2 <= max_value)
map_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2_range, axis=-1),
np.all(pixels1 == [0, 0, 0], axis=-1) & np.all(pixels2 == [0, 0, 0], axis=-1),
)
white = np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2_range, axis=-1)
else:
map_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2 == [255, 255, 255], axis=-1),
np.all(pixels1 == [0, 0, 0], axis=-1) & np.all(pixels2 == [0, 0, 0], axis=-1),
)
diff_img = np.array(img1)
#In red the different pixels
diff_img[diff_pixels] = [255, 0, 0]
#In blue the white pixels of both images
diff_img[white_pixels_1] = [0,0,255]
diff_img[white_pixels_2] = [0, 0, 255]
#In green the map pixels (white and black) that appear simultaneously in both images
diff_img[map_pixels] = [0,255,0]
plt.imshow(diff_img)
plt.title('Differences between Image 1 and Image 2')
plt.show()
|
joaofgois/saut_ogm
|
scripts/MapComparisonMetric.py
|
MapComparisonMetric.py
|
py
| 4,169
|
python
|
en
|
code
| 0
|
github-code
|
6
|
13126975486
|
#다익스트라 알고리즘 연습
#프로그래머스 합승 택시 요금
import sys
import heapq
# 다익스트라 알고리즘
def dijkstra(s, e):
global graph, length
# 방문한 노드를 최대값으로 세팅
visit = [sys.maxsize]*(length+1)
# start node는 0으로 바꾸어주고
visit[s] = 0
# 우선순위힙큐에 [cost, node]로 넣어준다
pq = [[0, s]]
heapq.heapify(pq)
# bfs 진행
while pq:
cost, node = heapq.heappop(pq)
# 해당노드를 방문하는데 드는 비용이 기존 최소비용보다 큰 경우는 무시
if cost > visit[node]:
continue
# 그 다음 방문 가능한 노드 탐색
for i in graph[node]:
new_node, new_cost = i[0], i[1]
# 기존의 비용에 cost 추가해서 새로운 비용
new_cost += cost
# 만약 새로운 비용이 기존의 방문노드를 방문하는데 드는 비용보다 작을 경우만 진행
if new_cost < visit[new_node]:
# 방문노드 값을 갱신
visit[new_node] = new_cost
# heapq에 넣어주고 계속 진행
heapq.heappush(pq, [new_cost, new_node])
return visit[e]
def solution(n, s, a, b, fares):
global graph, length
answer = sys.maxsize
graph = [[] for _ in range(n+1)]
length = n
for i, j, cost in fares:
graph[i].append([j, cost])
graph[j].append([i, cost])
for i in range(1, n+1):
answer = min(answer, dijkstra(s, i) + dijkstra(i, a) + dijkstra(i, b))
return answer
|
39world/Today-Algorithm-Study-
|
old_test/al_th_02.py
|
al_th_02.py
|
py
| 1,747
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
6690596464
|
from json import load
with open('config.json', 'r') as file:
params = load(file)
BOT_TOKEN = params['BOT_TOKEN']
PARAMS = params['PARAMS']
SEARCH_URL = params['SEARCH_URL']
HOST = params['HOST']
PORT = params['PORT']
DB_NAME = params['DB_NAME']
|
YusupovAI/TelegramBot
|
config.py
|
config.py
|
py
| 274
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22021057101
|
from fractions import Fraction
from typing import Generic, TypeVar
import funcy
# generic `NamedTuple`s were only introduced in Python 3.11 - until then we need to
# import from `typing_extensions`
from typing_extensions import NamedTuple
from boiling_learning.io.dataclasses import dataclass
_T = TypeVar('_T')
class DatasetTriplet(NamedTuple, Generic[_T]):
train: _T
val: _T
test: _T
@dataclass(frozen=True)
class DatasetSplits:
train: Fraction | None = None
test: Fraction | None = None
val: Fraction | None = Fraction(0)
def __post_init__(self) -> None:
splits = (self.train, self.val, self.test)
n_nones = splits.count(None)
if n_nones > 1:
raise ValueError(
'at most one of *train*, *val* and *test* can be inferred (by passing `None`)'
)
if n_nones == 1:
names = ('train', 'val', 'test')
dct = funcy.zipdict(names, splits)
for name, split in dct.items():
if split is None:
others = funcy.omit(dct, [name])
others_sum = sum(others.values())
if not 0 < others_sum <= 1:
raise ValueError(
'it is required that 0 < '
+ ' + '.join(f'*{other}*' for other in others.keys())
+ ' <= 1'
)
split = 1 - others_sum
object.__setattr__(self, name, split)
splits = (self.train, self.val, self.test)
break
if sum(splits) != 1:
raise ValueError('*train* + *val* + *test* must equal 1')
if not (0 < self.train < 1 and 0 <= self.val < 1 and 0 < self.test < 1):
raise ValueError('it is required that 0 < (*train*, *test*) < 1 and 0 <= *val* < 1')
|
ruancomelli/boiling-learning
|
boiling_learning/datasets/splits.py
|
splits.py
|
py
| 1,911
|
python
|
en
|
code
| 7
|
github-code
|
6
|
39254020126
|
import os
import rasterio
import geopandas as gpd
import shapely
from shapely.geometry import box
from tqdm import tqdm
def parse_txt(txt_dir):
"""
Read txt file.
bbox format - xmin, ymin, xmax, ymax (unnormalized).
Params:
txt_dir (str): path to text file containing bboxes.
Returns:
example - [[xmin, ymin, xmax, ymax],
[xmin, ymin, xmax, ymax]]
"""
with open(txt_dir, 'r') as file:
content = [i.strip().split(',') for i in file.readlines()]
bboxes = [list(map(float, i)) for i in content]
return bboxes
def parse_geojson(geojson_dir):
"""
Read geojson file.
Params:
geojson_dir (str): path to geosjon file containing coordinates and crs system. For geo-referencing.
Returns:
image_id (str)
src_crs (source crs)
left (float)
top (float)
right (float)
bottom (float)
"""
# read geojson file
geo_df = gpd.read_file(geojson_dir)
image_id = str(geo_df.iloc[0]['id'].item())
left = geo_df.iloc[0]['left'].item()
top = geo_df.iloc[0]['top'].item()
right = geo_df.iloc[0]['right'].item()
bottom = geo_df.iloc[0]['bottom'].item()
src_crs = geo_df.crs
return image_id, src_crs, left, top, right, bottom
def produce_geo_files(model_output_folder, geojson_folder, output_folder):
"""
Geo-reference bounding boxes(model predictions) from text files and produce geojson files.
Params:
model_output_folder (str): folder containing model prediction text files
geojson_folder (str): folder containing geojson files to be used for geo-referencing
output_folder (str): folder where final geojson files containing geo-referenced model predictions will be produced.
Returns:
None
"""
txt_file_list = os.listdir(model_output_folder)
filename_list = [os.path.splitext(i)[0] for i in txt_file_list]
os.makedirs(output_folder, exist_ok = True)
# for each text file
for filename in filename_list:
# w, h assumed to be 1000x1000
image_width, image_height = 1000, 1000
# file dirs
geojson_dir = os.path.join(geojson_folder, filename + '.geojson')
txt_dir = os.path.join(model_output_folder, filename + '.txt')
# get bounding box list from txt file
bboxes = parse_txt(txt_dir)
# get geo-information for current png image tile
image_id, src_crs, left, top, right, bottom = parse_geojson(geojson_dir)
# used for mapping image pixel values to geo-coordinates
affine_tfm = rasterio.transform.from_bounds(west = left, south = bottom, east = right, north = top,
width = image_width, height = image_height)
bbox_geom_list, centroid_geom_list = [], []
# for each bbox in current txt file
for bbox in bboxes:
xmin, ymin, xmax, ymax = bbox
print('box coords:', xmin, ymin, xmax, ymax)
# geo-reference bounding box
bbox_geom = pix_to_geo_coords(affine_tfm, xmin, ymin, xmax, ymax)
# centroid of bounding box
bbox_centroid = bbox_geom.centroid
# append geo-registered bounding box and centroid
bbox_geom_list.append([bbox_geom])
centroid_geom_list.append([bbox_centroid])
# create 2 dataframes - one for bbox and one for centroid
bbox_geo_df = gpd.GeoDataFrame(bbox_geom_list, columns=['geometry'], crs=src_crs)
centroid_geo_df = gpd.GeoDataFrame(centroid_geom_list, columns=['geometry'], crs=src_crs)
# save dirs for 2 dataframes
bbox_gdf_save_dir = os.path.join(output_folder, filename + '_box' + '.geojson')
centroid_gdf_save_dir = os.path.join(output_folder, filename + '_centroid' + '.geojson')
# save 2 dataframes
bbox_geo_df.to_file(bbox_gdf_save_dir, driver='GeoJSON')
centroid_geo_df.to_file(centroid_gdf_save_dir, driver='GeoJSON')
def split_geojsons(geojson_dir, output_folder):
"""
Splitting the original geojson file 'sudan_grid.geojson' (file size around 2.4 Gb).
The geojson file contains geo-information (e.g. top, left, bottom, right geo-coordinates) for all png tiles.
After splitting, each geojson file will contain geo-information for only a single png tile.
Params:
geojson_dir (str): path to the original geojson file 'sudan_grid.geojson'
output_folder (str): folder where geojson files for each png tile will be produced.
Returns:
None
"""
os.makedirs(output_folder, exist_ok = True)
data = gpd.read_file(geojson_dir)
total_rows = len(data)
crs = data.crs
for idx in tqdm(range(total_rows)):
row = list(data.loc[idx])
file_id = str(row[0])
save_dir = os.path.join(output_folder, file_id + '.geojson')
gdf = gpd.GeoDataFrame([row], columns=['id', 'left', 'top', 'right', 'bottom', 'geometry'], crs=crs)
gdf.to_file(save_dir, driver='GeoJSON')
print(save_dir, ' --> Done.')
def pix_to_geo_coords(affine_tfm, xmin, ymin, xmax, ymax):
"""
Geo-reference a bounding box.
Params:
affine_tfm (affine.Affine): used for affine transformation
xmin (float): x min value of bounding box
ymin (float): y min value of bounding box
xmax (float): x max value of bounding box
ymax (float): y max value of bounding box
Returns:
geo_box (shapely.geometry.polygon.Polygon)
"""
shapely_box = box(xmin, ymin, xmax, ymax)
geo_box = shapely.affinity.affine_transform(shapely_box,
[affine_tfm.a,
affine_tfm.b,
affine_tfm.d,
affine_tfm.e,
affine_tfm.xoff,
affine_tfm.yoff])
return geo_box
|
unicef/Mongolia-school-mapping-AI-models
|
codes/geo_utils.py
|
geo_utils.py
|
py
| 6,240
|
python
|
en
|
code
| 2
|
github-code
|
6
|
35253535585
|
"""
A list of utility functions for creating test and training datasets from labelled hyperspectral data. Note that we avoid implementing
specific supervised classification algorithms, as scikit-learn already does an excellent job of this. Hence, the following
functions are simply designed to easily extract features and labels that are compatible with scikit-learn.
"""
import numpy as np
from hylite import HyData
def get_feature_vectors(data, labels, ignore=[]):
"""
Returns a feature vector and associated labels from a HyData instance.
Args:
data: the dataset (HyData instance) to extract features from.
labels: a list of boolean point or pixel masks where True values should be associated with
that label. Generated label indices will range from 0 to len(labels). Alternatively, if labels is
a HyData instance (e.g. a classification image), then labels will be extacted from this.
ignore: a list of labels to ignore (if labels is a HyData instance). E.g. [ 0 ] will ignore pixels labelled as background.
Returns:
A tuple containing:
- F = a list containing a feature array for each class in labels.
- c = a list of the number of features for each class.
"""
# build boolean masks from HyData instance if necessary
if isinstance(labels, HyData):
# extract unique labels
ll = np.unique(labels.data)
# remove ignored labels
for n in ignore:
ll = np.delete(ll, np.where(ll == n))
# sort increasing
ll = np.sort(ll)
# build masks
masks = [labels.data[..., 0] == n for n in ll]
# return features
return get_feature_vectors(data, masks)
# check labels do not overlap...
assert np.max(np.sum(labels, axis=0)) == 1, "Error - class labels overlap..."
# reshape image data
data = data.get_raveled()
# get features
F = []
c = []
for i, lab in enumerate(labels):
mask = lab.reshape(data.shape[0]).astype(bool)
F.append(data[mask])
c.append(np.sum(mask))
return F, c
def balance( F, n=1.0):
"""
Samples a balanced feature vector from a list of features, as returned by
get_feature_vectors( ... ).
Args:
F: a list containing an array of features for each class.
n: the number of features to extract. Default is None (extract as mean features as possible).
If a float between 0 and 1 is passed then it is treated as a fraction of the maximum number of features.
If an integer is passed then this number of features will be extracted (or max(counts)).
Returns:
A tuple containing:
- X = a balanced feature feature vector with dimensions N_samples x M_features.
- y = an array of length N_samples containing labels for each feature (ranging from 0 - n_classes).
"""
c = [f.shape[0] for f in F]
if n > 0 and n <= 1:
n = int(n * np.min(c))
else:
n = int(n)
assert n < np.min(c), "Error - unsufficient training data (%d < %d)" % (n, np.min(c))
# balance dataset
X = []
y = []
for i, f in enumerate(F):
idx = np.random.choice(f.shape[0], n, replace=False)
X.append(f[idx])
y.append(np.full((n), i))
# shuffle (just because)
X = np.vstack(X)
y = np.hstack(y)
idx = np.random.choice(y.shape[0], y.shape[0], replace=False)
return X[idx, :], y[idx]
def split(X, y, frac=0.5):
"""
Randomly split a labeled feature set into testing and training sets.
Args:
X: the feature set to split.
y: the label set to split.
frac: the fraction of train vs test datasets. Default is 0.5 (50%).
Returns:
train_X, train_y, test_X, test_y = training and testing features and labels.
"""
# extract training dataset
n_train = int(X.shape[0] * frac)
mask = np.full(X.shape[0], False)
mask[np.random.choice(X.shape[0], n_train, replace=False)] = True
return X[mask, :], y[mask], X[np.logical_not(mask), :], y[np.logical_not(mask)]
|
hifexplo/hylite
|
hylite/analyse/supervised.py
|
supervised.py
|
py
| 4,132
|
python
|
en
|
code
| 24
|
github-code
|
6
|
13703423658
|
# pyedit
# create at 2015/6/14
# autor: qianqians
from tools import argv_instance
from pyelement import pyelement
class pyedit(pyelement):
# edit input type
text = "text"
password = "password"
textarea = "textarea"
#event
oninput = "oninput"
onkeydown = "onkeydown"
def __init__(self, cname, type, layout, praframe):
self.type = type
super(pyedit, self).__init__(cname, layout, praframe)
def sub(self, id = None):
js = " var table_" + self.id + " = document.createElement(\"input\");\n"
js += " table_" + self.id + ".type = \"" + self.type + "\";\n"
js += super(pyedit, self).sub()
if id:
js += " " + id + ".appendChild(table_" + self.id + ");\n"
else:
js += " table_pop.appendChild(table_" + self.id + ");\n"
return js
def flush(self):
# if img is not none, use img for button,
# if img is none, use text for button,
# handle onclick in js and send a requst to service
# codegen css in page
shtml = ""
if self.html is not None:
shtml = self.html
else:
if self.type == pyedit.textarea:
shtml += "<div id=\"" + self.id + "_1\"><textarea id=\"" + self.id + "\""
else:
shtml += "<div id=\"" + self.id + "_1\"><input id=\"" + self.id + "\" type=\"" + self.type + "\""
for event, onevent in self.uievent.iteritems():
shtml += event + "=" + self.id + event + "(this)"
if self.width > 0 and self.height > 0:
shtml += " style=\"height:" + str(self.height) + "px;width:" + str(self.width) + "px\""
if self.type == pyedit.textarea:
shtml += "></textarea></div>"
else:
shtml += "></div>"
js = ""
if self.js is not None:
js = self.js
else:
for event, onevent in self.uievent.iteritems():
js += "function " + self.id + event + "(id){"
for f in onevent:
js += f
js += "}\n"
return shtml, js
def client_get_input_text(self):
return "document.getElementById(\"" + self.id + "\").value"
def client_set_input_text(self, text):
return "id.value=\"" + text + "\";\n"
def server_get_input_text(self, text_key):
return "document.getElementById(\"" + self.id + "\").value=value[\"" + text_key + "\"];\n"
def pop_get_input_text(self):
return " table_" + self.id + ".value\n"
|
theDarkForce/plask
|
plask/pyedit.py
|
pyedit.py
|
py
| 2,205
|
python
|
en
|
code
| 2
|
github-code
|
6
|
74977717307
|
import csv
import os
from datetime import datetime
import logging
import re
from dipper.sources.PostgreSQLSource import PostgreSQLSource
from dipper.models.assoc.Association import Assoc
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.models.Model import Model
from dipper import config
from dipper.models.GenomicFeature import Feature, makeChromID
LOG = logging.getLogger(__name__)
class MGI(PostgreSQLSource):
"""
This is the
[Mouse Genome Informatics](http://www.informatics.jax.org/) resource,
from which we process genotype and phenotype data about laboratory mice.
Genotypes leverage the GENO genotype model.
Here, we connect to their public database, and download a subset of
tables/views to get specifically at the geno-pheno data,
then iterate over the tables. We end up effectively performing joins
when adding nodes to the graph.
In order to use this parser, you will need to have user/password connection
details in your conf.yaml file, like:
dbauth : {'mgi' : {'user' : '<username>', 'password' : '<password>'}}
You can request access by contacting mgi-help@jax.org
"""
# CONSIDER IF WE NEED:
# mgi_organism_acc_view:
# Consider using this for the taxon mapping instead of
# the hashmap encoded below
# mgi_reference_allele_view:
# Don't believe this view is used in either
# the genotype of phenotype view
# all_allele_cellline_view: When we want to start dealing with cell lines
# mgi_note_strain_view: prose descriptions of strains.
# prb_strain_summary_view:
# Don't believe this view is used in
# either the genotype of phenotype view
# prb_strain_marker_view:
# eventually i think we want this because
# it has other relevant markers that are affected
resources = {
'query_map': [
{
'query': '../../resources/sql/mgi/mgi_dbinfo.sql',
'outfile': 'mgi_dbinfo',
'Force': True
},
{
'query': '../../resources/sql/mgi/gxd_genotype_view.sql',
'outfile': 'gxd_genotype_view'
},
{
'query': '../../resources/sql/mgi/gxd_genotype_summary_view.sql',
'outfile': 'gxd_genotype_summary_view'
},
{
'query': '../../resources/sql/mgi/gxd_allelepair_view.sql',
'outfile': 'gxd_allelepair_view'
},
{
'query': '../../resources/sql/mgi/all_summary_view.sql',
'outfile': 'all_summary_view'
},
{
'query': '../../resources/sql/mgi/all_allele_view.sql',
'outfile': 'all_allele_view'
},
{
'query': '../../resources/sql/mgi/all_allele_mutation_view.sql',
'outfile': 'all_allele_mutation_view'
},
{
'query': '../../resources/sql/mgi/mrk_marker_view.sql',
'outfile': 'mrk_marker_view'
},
{
'query': '../../resources/sql/mgi/voc_annot_view.sql',
'outfile': 'voc_annot_view'
},
{
'query': '../../resources/sql/mgi/evidence.sql',
'outfile': 'evidence_view'
},
{
'query': '../../resources/sql/mgi/bib_acc_view.sql',
'outfile': 'bib_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_view.sql',
'outfile': 'prb_strain_view'
},
{
'query': '../../resources/sql/mgi/mrk_summary_view.sql',
'outfile': 'mrk_summary_view'
},
{
'query': '../../resources/sql/mgi/mrk_acc_view.sql',
'outfile': 'mrk_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_acc_view.sql',
'outfile': 'prb_strain_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_genotype_view.sql',
'outfile': 'prb_strain_genotype_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_vocevidence_view.sql',
'outfile': 'mgi_note_vocevidence_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_allele_view.sql',
'outfile': 'mgi_note_allele_view'
},
{
'query': '../../resources/sql/mgi/mrk_location_cache.sql',
'outfile': 'mrk_location_cache' # gene locations
}
],
'test_keys': '../../resources/mgi_test_keys.yaml'
}
# with an existing set of (fresh) files in the shell; we can get a head start with:
# for v in raw/mgi/*;do echo -e "\t\t'${v##*/}': \
# {\n\t\t\t'columns': [";head -1 $v|tr '\t' '\n'|sed "s/\(.*\)/\t\t\t\t'\1',/";done
tables = {
'all_allele_mutation_view': {
'columns': [
'_allele_key',
'mutation']},
'all_allele_view': {
'columns': [
'_allele_key',
'_marker_key',
'_strain_key',
'symbol',
'name',
'iswildtype']},
'all_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'description',
'short_description']},
'bib_acc_view': {
'columns': [
'accid',
'prefixpart',
'numericpart',
'_object_key',
'logicaldb',
'_logicaldb_key']},
'evidence_view': {
'columns': [
'_annotevidence_key',
'_annot_key',
'evidencecode',
'jnumid',
'term',
'value',
'annottype']},
'gxd_allelepair_view': {
'columns': [
'_allelepair_key',
'_genotype_key',
'_allele_key_1',
'_allele_key_2',
'allele1',
'allele2',
'allelestate']},
'gxd_genotype_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'gxd_genotype_view': {
'columns': [
'_genotype_key',
'_strain_key',
'strain',
'mgiid']},
'mgi_note_allele_view': {
'columns': [
'_object_key',
'notetype',
'note',
'sequencenum']},
'mgi_note_vocevidence_view': {
'columns': [
'_object_key',
'note']},
'mgi_relationship_transgene_genes': {
'columns': [
'rel_key',
'object_1',
'allele_id',
'allele_label',
'category_key',
'category_name',
'property_key',
'property_name',
'property_value']},
'mrk_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred',
'_organism_key']},
'mrk_location_cache': {
'columns': [
'_marker_key',
'_organism_key',
'chromosome',
'startcoordinate',
'endcoordinate',
'strand',
'version']},
'mrk_marker_view': {
'columns': [
'_marker_key',
'_organism_key',
'_marker_status_key',
'symbol',
'name',
'latinname',
'markertype']},
'mrk_summary_view': {
'columns': [
'accid',
'_logicaldb_key',
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'prb_strain_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred']},
'prb_strain_genotype_view': {
'columns': [
'_strain_key',
'_genotype_key']},
'prb_strain_view': {
'columns': [
'_strain_key',
'strain',
'species']},
'voc_annot_view': {
'columns': [
'_annot_key',
'annottype',
'_object_key',
'_term_key',
'_qualifier_key',
'qualifier',
'term',
'accid']},
}
# For ambiguous/undefined taxa terms that will
# conflict with seq alt_type portion of local tt
unknown_taxa = [
'Not Applicable',
'Not Specified',
]
# for testing purposes, this is a list of internal db keys
# to match and select only portions of the source
def __init__(
self,
graph_type,
are_bnodes_skolemized,
data_release_version=None
):
super().__init__(
graph_type=graph_type,
are_bnodes_skolemized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='mgi',
ingest_title='Mouse Genome Informatics',
ingest_url='http://www.informatics.jax.org/',
ingest_logo="source-mgi.png",
license_url=None,
data_rights='http://www.informatics.jax.org/mgihome/other/copyright.shtml',
file_handle=None)
# so that we don't have to deal with BNodes,
# we will create hash lookups
# for the internal identifiers the hash will hold
# the type-specific-object-keys to MGI public identifiers.
# then, subsequent views of the table will lookup the identifiers
# in the hash. this allows us to do the 'joining' on the fly
self.idhash = {
'allele': {}, 'marker': {}, 'publication': {}, 'strain': {},
'genotype': {}, 'annot': {}, 'notes': {}, 'seqalt': {}}
# to store if a marker is a class or indiv
self.markers = {
'classes': [], 'indiv': []}
# use this to store internally generated labels for various features
self.label_hash = {}
# use this to store the genotype strain ids
# for building genotype labels
self.geno_bkgd = {}
self.strain_to_genotype_map = {}
self.wildtype_alleles = set()
# also add the gene ids from the test_ids
# in order to capture transgenes of the test set
if 'gene' in self.all_test_ids:
self.test_ids = self.all_test_ids['gene']
else:
LOG.warning("not configured with gene test ids.")
self.test_ids = []
self.test_keys = self.open_and_parse_yaml(self.resources['test_keys'])
def fetch(self, is_dl_forced=False):
"""
For the MGI resource, we connect to the remote database,
and pull the tables into local files.
We'll check the local table versions against the remote version
:return:
"""
# check if config exists; if it doesn't, error out and let user know
if 'dbauth' not in config.get_config() and 'mgi' \
not in config.get_config()['dbauth']:
LOG.error("not configured with PG user/password.")
# create the connection details for MGI
cxn = config.get_config()['dbauth']['mgi']
pg_iri = ''.join((
'jdbc:postgresql://', cxn['host'], ':', str(cxn['port']), '/',
cxn['database']))
self.dataset.set_ingest_source(pg_iri)
self.dataset.set_ingest_source_file_version_retrieved_on(
pg_iri,
datetime.today().strftime('%Y-%m-%d'))
# process the tables
# self.fetch_from_pgdb(self.tables, cxn, 100) # for testing only
# self.fetch_from_pgdb(self.tables, cxn, None, is_dl_forced)
for query_map in self.resources['query_map']:
query_fh = open(os.path.join(
os.path.dirname(__file__), query_map['query']), 'r')
query = query_fh.read()
# force = False
# if 'Force' in query_map: # unused
# force = query_map['Force']
self.fetch_query_from_pgdb(
query_map['outfile'], query, None, cxn)
# always get this - it has the verion info
self.fetch_transgene_genes_from_db(cxn)
datestamp = ver = None
# get the resource version information from
# table mgi_dbinfo, already fetched above
outfile = '/'.join((self.rawdir, 'mgi_dbinfo'))
if os.path.exists(outfile):
with open(outfile, 'r') as reader:
reader.readline() # read the header row; skip
info = reader.readline()
cols = info.split('\t')
ver = cols[0] # col 0 is public_version
ver = ver.replace('MGI ', '') # MGI 5.20 --> 5.20
# MGI has a datestamp for the data within the database;
# use it instead of the download date
# datestamp in the table: 2014-12-23 00:14:20[.12345]
# modification date without micro seconds
dat = cols[1].strip().split('.')[0]
datestamp = datetime.strptime(
dat, "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
self.dataset.set_ingest_source_file_version_num(pg_iri, ver)
self.dataset.set_ingest_source_file_version_date(pg_iri, datestamp)
def parse(self, limit=None):
"""
We process each of the postgres tables in turn.
The order of processing is important here, as we build
up a hashmap of internal vs external identifers
(unique keys by type to MGI id). These include allele, marker (gene),
publication, strain, genotype, annotation (association),
and descriptive notes.
:param limit: Only parse this many rows in each table
:return:
"""
if limit is not None:
LOG.info("Only parsing first %d rows of each file", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# the following will provide us the hash-lookups
# These must be processed in a specific order
self._process_prb_strain_acc_view(limit)
self._process_mrk_acc_view()
self._process_all_summary_view(limit)
self._process_bib_acc_view(limit)
self._process_gxd_genotype_summary_view(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._process_prb_strain_view(limit)
# self._process_prb_strain_genotype_view(limit)
self._process_gxd_genotype_view(limit)
self._process_mrk_marker_view(limit)
self._process_mrk_acc_view_for_equiv(limit)
self._process_mrk_summary_view(limit)
self._process_all_allele_view(limit)
self._process_all_allele_mutation_view(limit)
self._process_gxd_allele_pair_view(limit)
self._process_voc_annot_view(limit)
self._process_evidence_view(limit)
self._process_mgi_note_vocevidence_view(limit)
self._process_mrk_location_cache(limit)
self.process_mgi_relationship_transgene_genes(limit)
self.process_mgi_note_allele_view(limit)
LOG.info("Finished parsing.")
LOG.info("Loaded %d nodes", len(self.graph))
def fetch_transgene_genes_from_db(self, cxn):
"""
This is a custom query to fetch the non-mouse genes that
are part of transgene alleles.
:param cxn:
:return:
"""
query = '''
SELECT r._relationship_key as rel_key,
r._object_key_1 as object_1,
a.accid as allele_id,
alabel.label as allele_label,
rc._category_key as category_key,
rc.name as category_name,
t._term_key as property_key,
t.term as property_name,
rp.value as property_value
FROM mgi_relationship r
JOIN mgi_relationship_category rc ON r._category_key = rc._category_key
JOIN acc_accession a ON r._object_key_1 = a._object_key
AND rc._mgitype_key_1 = a._mgitype_key
AND a._logicaldb_key = 1
JOIN all_label alabel ON a._object_key = alabel._allele_key
AND alabel._label_status_key = 1
AND alabel.priority = 1
JOIN mgi_relationship_property rp ON r._relationship_key = rp._relationship_key
AND rp._propertyname_key = 12948292
JOIN voc_term t ON rp._propertyname_key = t._term_key
WHERE r._category_key = 1004
'''
self.fetch_query_from_pgdb(
'mgi_relationship_transgene_genes', query, None, cxn)
def _process_gxd_genotype_view(self, limit=None):
"""
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
"""
src_key = 'gxd_genotype_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
col = self.tables[src_key]['columns']
raw = '/'.join((self.rawdir, src_key))
LOG.info("getting genotypes and their backgrounds")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
genotype_key = row[col.index('_genotype_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain',)].strip()
mgiid = row[col.index('mgiid')].strip()
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
if self.idhash['genotype'].get(genotype_key) is None:
# just in case we haven't seen it before,
# catch and add the id mapping here
self.idhash['genotype'][genotype_key] = mgiid
geno.addGenotype(mgiid, None)
# the label is elsewhere...
# need to add the MGI label as a synonym
# if it's in the hash,
# assume that the individual was created elsewhere
strain_id = self.idhash['strain'].get(strain_key)
background_type = self.globaltt['genomic_background']
if strain_id is None or int(strain_key) < 0:
if strain_id is None:
# some of the strains don't have public identifiers!
# so we make one up, and add it to the hash
strain_id = self._make_internal_identifier('strain', strain_key)
self.idhash['strain'].update({strain_key: strain_id})
model.addComment(strain_id, "strain_key:" + strain_key)
elif int(strain_key) < 0:
# these are ones that are unidentified/unknown.
# so add instances of each.
strain_id = self._make_internal_identifier(
'strain', re.sub(r':', '', str(strain_id)))
strain_id += re.sub(r':', '', str(mgiid))
strain_id = re.sub(r'^_', '_:', strain_id)
strain_id = re.sub(r'::', ':', strain_id)
model.addDescription(
strain_id,
"This genomic background is unknown. " +
"This is a placeholder background for " +
mgiid + "."
)
background_type = self.globaltt[
'unspecified_genomic_background']
# add it back to the idhash
LOG.info(
"adding background as internal id: %s %s: %s",
strain_key, strain, strain_id)
geno.addGenomicBackgroundToGenotype(
strain_id, mgiid, background_type)
self.label_hash[strain_id] = strain
# add BG to a hash so we can build the genotype label later
self.geno_bkgd[mgiid] = strain_id
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
src_key = 'gxd_genotype_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
geno_hash = {}
raw = '/'.join((self.rawdir, src_key))
LOG.info("building labels for genotypes")
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_num > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a label
# (we generate our own label later and add as a synonym)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
model.addComment(
gt, self._make_internal_identifier(
'genotype', genotype.get('key')
)
)
geno.addGenotype(gt, label.strip())
def _process_all_summary_view(self, limit):
"""
Here, we get the allele definitions: id, label, description, type
We also add the id to this source's global idhash for lookup later
<alleleid> a OWL:NamedIndividual
rdfs:label "allele symbol"
dc:description "long allele name"
:param limit:
:return:
"""
src_key = 'all_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
LOG.info(
"alleles with labels and descriptions from all_summary_view")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
# head -1 workspace/build-mgi-ttl/dipper/raw/mgi/all_summary_view|\
# tr '\t' '\n' | grep -n . | \
# awk -F':' '{col=$1;$1="";print $0,",\t #" col}'
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
# no stray tab in the description column
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
description = row[col.index('description')].strip()
short_description = row[col.index('short_description')].strip()
# NOTE: May want to filter alleles based on the preferred field
# (preferred = 1) or will get duplicates
# (24288, to be exact...
# Reduced to 480 if filtered on preferred = 1)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('allele'):
continue
# we are setting the allele type to None,
# so that we can add the type later
# since we don't actually know
# if it's a reference or altered allele
# altype = None # temporary; we'll assign the type later
# set type to a parent term incase a more specific term is not found
altype = self.globaltt['allele']
# If we want to filter on preferred:
if preferred == '1':
# add the allele key to the hash for later lookup
self.idhash['allele'][object_key] = mgiid
# TODO consider not adding the individuals in this one
model.addIndividualToGraph(
mgiid, short_description.strip(), altype, description.strip()
)
self.label_hash[mgiid] = short_description.strip()
# TODO deal with non-preferreds, are these deprecated?
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_all_allele_view(self, limit):
"""
Add the allele as a variant locus (or reference locus if wild-type).
If the marker is specified, we add the link to the marker.
We assume that the MGI ids are available in the idhash,
added in all_summary_view.
We add the sequence alteration as a BNode here, if there is a marker.
Otherwise, the allele itself is a sequence alteration.
Triples:
<MGI:allele_id> a GENO:variant_locus
OR GENO:reference_locus
OR GENO:sequence_alteration IF no marker_id specified.
[GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>
GENO:derived_from <MGI:strain_id>
GENO:has_variant_part <_seq_alt_id>
<_seq_alt_id> a GENO:sequence_alteration
derives_from <strain_id>
:param limit:
:return:
"""
src_key = 'all_allele_view'
# transmission_key -> inheritance? Need to locate related table.
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
LOG.info(
"adding alleles, mapping to markers, extracting their "
"sequence alterations from all_allele_view")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
allele_key = row[col.index('_allele_key')].strip()
marker_key = row[col.index('_marker_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
symbol = row[col.index('symbol')].strip()
name = row[col.index('name')].strip()
iswildtype = row[col.index('iswildtype')].strip()
# TODO update processing to use this view better
# including jnums!
if self.test_mode is True and \
int(allele_key) not in self.test_keys.get('allele'):
continue
# so are allele_key ints or not? -TEC
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
LOG.error(
"what to do! can't find allele_id. skipping %s %s",
allele_key, symbol)
continue
marker_id = None
if marker_key is not None and marker_key != '':
# we make the assumption here that the markers
# have already been added to the table
marker_id = self.idhash['marker'].get(marker_key)
if marker_id is None:
LOG.error(
"what to do! can't find marker_id. skipping %s %s",
marker_key, symbol)
continue
iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
# for non-wild type alleles:
if iswildtype == '0':
locus_type = self.globaltt['variant_locus']
locus_rel = self.globaltt['is_allele_of']
# for wild type alleles:
elif iswildtype == '1':
locus_type = self.globaltt['reference_locus']
locus_rel = self.globaltt['is_reference_allele_of']
# add the allele to the wildtype set for lookup later
self.wildtype_alleles.add(allele_id)
else:
locus_rel = None
locus_type = None
model.addIndividualToGraph(allele_id, symbol, locus_type)
model.makeLeader(allele_id)
self.label_hash[allele_id] = symbol
self.idhash['seqalt'][allele_key] = iseqalt_id
# HACK - if the label of the allele == marker,
# then make the thing a seq alt
allele_label = self.label_hash.get(allele_id)
marker_label = self.label_hash.get(marker_id)
if allele_label is not None and allele_label == marker_label:
# model.addSameIndividual(allele_id, marker_id)
# this causes disjoint category violations, see
# https://github.com/monarch-initiative/dipper/issues/519
self.idhash['seqalt'][allele_key] = allele_id
model.addComment(
allele_id,
self._make_internal_identifier('allele', allele_key)
)
if marker_id is not None:
# marker_id will be none if the allele
# is not linked to a marker
# (as in, it's not mapped to a locus)
geno.addAlleleOfGene(allele_id, marker_id, locus_rel)
# sequence alteration in strain
if iswildtype == '0':
sa_label = symbol
sa_id = iseqalt_id
if marker_key is not None \
and allele_label != marker_label and marker_key != '':
# sequence alteration has label reformatted(symbol)
if re.match(r".*<.*>.*", symbol):
sa_label = re.sub(r".*<", "<", symbol)
elif re.match(r"\+", symbol):
# TODO: Check to see if this is the proper handling
# as while symbol is just +,
# marker symbol has entries without any <+>.
sa_label = '<+>'
geno.addSequenceAlterationToVariantLocus(iseqalt_id,
allele_id)
else:
# make the sequence alteration == allele
sa_id = allele_id
# else this will end up adding the non-located transgenes
# as sequence alterations also removing the < and > from sa
sa_label = re.sub(r'[\<\>]', '', sa_label)
geno.addSequenceAlteration(sa_id, sa_label, None, name)
self.label_hash[sa_id] = sa_label
strain_id = self.idhash['strain'].get(strain_key)
# scrub out if the strain is "not specified"
if strain_id is not None and \
strain_id not in ['MGI:4867032', 'MGI:5649511']:
geno.addSequenceDerivesFrom(allele_id, strain_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_allele_pair_view(self, limit):
"""
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
"""
src_key = 'gxd_allelepair_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("processing allele pairs (VSLCs) for genotypes")
geno_hash = {}
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allelepair_key = row[col.index('_allelepair_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
allele_key_1 = row[col.index('_allele_key_1')].strip()
allele_key_2 = row[col.index('_allele_key_2')].strip()
allele1 = row[col.index('allele1')].strip()
allele2 = row[col.index('allele2')].strip()
allelestate = row[col.index('allelestate')].strip()
# NOTE: symbol = gene/marker,
# allele1 + allele2 = VSLC,
# allele1/allele2 = variant locus,
# allelestate = zygosity
# FIXME Need to handle alleles not in the *<*> format,
# incl gene traps, induced mut, & transgenics
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = set()
if genotype_id is None:
LOG.error(
"genotype_id not found for key %s; skipping", genotype_key)
continue
allele1_id = self.idhash['allele'].get(allele_key_1)
allele2_id = self.idhash['allele'].get(allele_key_2)
# Need to map the allelestate to a zygosity term
zygosity_id = self.resolve(allelestate.strip())
ivslc_id = self._make_internal_identifier('vslc', allelepair_key)
geno_hash[genotype_id].add(ivslc_id)
# TODO: VSLC label likely needs processing similar to
# the processing in the all_allele_view
# FIXME: handle null alleles
vslc_label = allele1 + '/'
if allele2_id is None:
if zygosity_id in [
self.globaltt['hemizygous insertion-linked'],
self.globaltt['hemizygous-x'],
self.globaltt['hemizygous-y'],
self.globaltt['hemizygous'],
]:
vslc_label += '0'
elif zygosity_id == self.globaltt['heterozygous']:
vslc_label += '+'
elif zygosity_id == self.globaltt['indeterminate']:
vslc_label += '?'
elif zygosity_id == self.globaltt['heteroplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homoplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homozygous']:
# we shouldn't get here, but for testing this is handy
vslc_label += allele1
else:
LOG.info(
"A different kind of zygosity found is: %s",
self.globaltcid[zygosity_id])
vslc_label += '?'
else:
vslc_label += allele2
model.addIndividualToGraph(
ivslc_id,
vslc_label,
self.globaltt['variant single locus complement']
)
self.label_hash[ivslc_id] = vslc_label
rel1 = rel2 = self.globaltt['has_variant_part']
if allele1_id in self.wildtype_alleles:
rel1 = self.globaltt['has_reference_part']
if allele2_id in self.wildtype_alleles:
rel2 = self.globaltt['has_reference_part']
geno.addPartsToVSLC(
ivslc_id, allele1_id, allele2_id, zygosity_id, rel1, rel2
)
# if genotype_id not in geno_hash:
# geno_hash[genotype_id] = [vslc_label]
# else:
# geno_hash[genotype_id] += [vslc_label]
if not self.test_mode and limit is not None and line_num > limit:
break
# build the gvc and the genotype label
for gt in geno_hash.keys():
if gt is None: # not sure why, but sometimes this is the case
continue
vslcs = sorted(list(geno_hash[gt]))
gvc_label = None
if len(vslcs) > 1:
gvc_id = re.sub(r'_', '', ('-'.join(vslcs)))
gvc_id = re.sub(r':', '', gvc_id)
gvc_id = self.make_id(gvc_id, '_')
vslc_labels = []
for v in vslcs:
vslc_labels.append(self.label_hash[v])
gvc_label = '; '.join(vslc_labels)
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
self.label_hash[gvc_id] = gvc_label
for v in vslcs:
geno.addParts(v, gvc_id, self.globaltt['has_variant_part'])
geno.addVSLCtoParent(v, gvc_id)
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
elif len(vslcs) == 1:
gvc_id = vslcs[0]
gvc_label = self.label_hash[gvc_id]
# type the VSLC as also a GVC
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement']
)
geno.addVSLCtoParent(gvc_id, gt)
else:
LOG.info("No VSLCs for %s", gt)
# make the genotype label = gvc + background
bkgd_id = self.geno_bkgd.get(gt)
if bkgd_id is not None:
bkgd_label = self.label_hash.get(bkgd_id)
if bkgd_label is None:
bkgd_label = bkgd_id # just in case
else:
bkgd_label = 'unspecified background'
if gvc_label is not None:
genotype_label = gvc_label + ' [' + bkgd_label + ']'
else:
genotype_label = '[' + bkgd_label + ']'
self.label_hash[gt] = genotype_label
def _process_all_allele_mutation_view(self, limit):
"""
This fetches the mutation type for the alleles,
and maps them to the sequence alteration.
Note that we create a BNode for the sequence alteration because
it isn't publicly identified.
<sequence alteration id> a <SO:mutation_type>
:param limit:
:return:
"""
src_key = 'all_allele_mutation_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting mutation types for sequence alterations")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allele_key = row[col.index('_allele_key')].strip()
mutation = row[col.index('mutation')].strip()
iseqalt_id = self.idhash['seqalt'].get(allele_key)
if iseqalt_id is None:
continue
# nothing will ever connect w/these 350k bnode "individuals"
# iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
if self.test_mode and int(allele_key) \
not in self.test_keys.get('allele'):
continue
# TODO we might need to map the seq alteration to the MGI id
# for unlocated things; need to use hashmap
# map the sequence_alteration_type
seq_alt_type_id = self.resolve(mutation, mandatory=False)
if seq_alt_type_id == mutation:
LOG.error("No mappjng found for seq alt '%s'", mutation)
LOG.info("Defaulting to 'sequence_alteration'")
seq_alt_type_id = self.globaltt['sequence_alteration']
# HACK - if the seq alteration is a transgene,
# then make sure it is a transgenic insertion
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is not None:
allele_label = self.label_hash.get(allele_id)
if allele_label is not None and re.search(r'Tg\(', allele_label):
LOG.info(
"Found a transgenic insertion for %s", allele_label)
# transgenic_insertion, instead of plain old insertion
seq_alt_type_id = self.globaltt["transgenic_insertion"]
model.addIndividualToGraph(iseqalt_id, None, seq_alt_type_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_voc_annot_view(self, limit):
"""
This MGI table represents associations between things.
We add the internal annotation id to the idhashmap.
It is expected that the genotypes have already been added to the idhash
:param limit:
:return:
"""
# TODO also get Strain/Attributes (annottypekey = 1000)
# TODO what is Phenotype (Derived) vs
# non-derived? (annottypekey = 1015)
# TODO is evidence in this table? what is the evidence vocab key?
src_key = 'voc_annot_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting G2P associations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip('\n').split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
row = line.rstrip('\n').split('\t')
annot_key = row[col.index('_annot_key')]
annot_type = row[col.index('annottype')]
object_key = row[col.index('_object_key')]
term_key = row[col.index('_term_key')]
qualifier_key = row[col.index('_qualifier_key')]
qualifier = row[col.index('qualifier')]
# term,
accid = row[col.index('accid')]
if self.test_mode is True:
if int(annot_key) not in self.test_keys.get('annot'):
continue
# qualifier of "norm" means the phenotype was measured but
# was normal, since we don't have negation or normal phenotypes
# modelled just yet, skip the row
if qualifier == 'norm':
continue
# iassoc_id = self._make_internal_identifier('annot', annot_key)
# assoc_id = self.make_id(iassoc_id)
assoc_id = None
# Mammalian Phenotype/Genotype are curated G2P assoc
if annot_type == 'Mammalian Phenotype/Genotype':
line_num += 1
# We expect the label for the phenotype
# to be taken care of elsewhere
model.addClassToGraph(accid, None)
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error(
"can't find genotype id for %s", object_key)
else:
# add the association
assoc = G2PAssoc(graph, self.name, genotype_id, accid)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
# OMIM/Genotype are disease-models
elif annot_type == 'DO/Genotype':
# skip NOT annotations for now FIXME
if qualifier_key == '1614157':
continue
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
# TODO PYLINT
# Redefinition of assoc type from
# dipper.models.assoc.G2PAssoc.G2PAssoc to
# dipper.models.assoc.Association.Assoc
assoc.set_subject(genotype_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
elif annot_type == 'MCV/Marker':
# marker category == type
marker_id = self.idhash['marker'].get(object_key)
if str(term_key).strip() in self.localtt:
# check "Not Applicable": "reference_locus"
term_id = self.resolve(str(term_key).strip())
else:
term_id = None
logging.warning('No type mapping for: %s', term_key)
# note that the accid here is an internal mouse cv term,
# and we don't use it.
if term_id is not None and marker_id is not None:
# do something special for transgenics -
# make sure these are transgenic insertions
model.addType(marker_id, term_id)
elif annot_type == 'DO/Allele': # allele/Disease
allele_id = self.idhash['allele'].get(object_key)
if allele_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
assoc.set_subject(allele_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
if assoc_id is not None:
# add the assoc to the hashmap (using the monarch id)
self.idhash['annot'][annot_key] = assoc_id
model.addComment(assoc_id, "annot_key:" + annot_key)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_evidence_view(self, limit):
"""
Here we fetch the evidence (code and publication) for the associations.
The evidence codes are mapped from the standard GO codes to ECO.
J numbers are added for publications.
We will only add the evidence if the annotation is in our idhash.
We also pull in evidence qualifiers, as of June 2018 they are
Data Interpretation Center (eg IMPC)
external ref (eg UniProtKB:Q9JHI2-3 for Proteoform/Marker assoc)
Phenotyping Center (eg WTSI)
Resource Name (eg MGP)
MP-Sex-Specificity (eg NA, M, F)
Triples:
<annot_id> dc:evidence <evidence_id>
<pub_id> a owl:NamedIndividual
<annot_id> dc:source <pub_id>
:param limit:
:return:
"""
src_key = 'evidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting evidence and pubs for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
line = reader.readline()
line = line.rstrip("\n")
row = line.split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
annot_evidence_key = row[col.index('_annotevidence_key')]
annot_key = row[col.index('_annot_key')]
evidence_code = row[col.index('evidencecode')]
jnumid = row[col.index('jnumid')]
qualifier = row[col.index('term')]
qualifier_value = row[col.index('value')]
# annotation_type = row[col.index('annottype')]
if self.test_mode and annot_key not in self.test_keys.get('annot'):
continue
# add the association id to map to the evidence key
# (to attach the right note to the right assn)
self.idhash['notes'][annot_evidence_key] = annot_key
assoc_id = self.idhash['annot'].get(annot_key)
if assoc_id is None:
# assume that we only want to add the evidence/source
# for annots that we have in our db
continue
evidence_id = self.resolve(evidence_code)
reference = Reference(graph, jnumid)
reference.addRefToGraph()
# add the ECO and citation information to the annot
model.addTriple(assoc_id, self.globaltt['has evidence'], evidence_id)
model.addTriple(assoc_id, self.globaltt['Source'], jnumid)
# For Mammalian Phenotype/Genotype annotation types
# MGI adds sex specificity qualifiers here
if qualifier == 'MP-Sex-Specificity' and qualifier_value in ('M', 'F'):
model._addSexSpecificity(assoc_id, self.resolve(qualifier_value))
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_bib_acc_view(self, limit):
"""
This traverses the table twice:
once to look up the internal key to J number mapping
for the id hashmap then again to make the equivalences.
All internal keys have both a J and MGI identifier.
This will make equivalences between the different pub ids
Triples:
<pub_id> a owl:NamedIndividual
<other_pub_id> a owl:NamedIndividual
<pub_id> owl:sameAs <other_pub_id>
:param limit:
:return:
"""
src_key = 'bib_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# firstpass, get the J number mapping, and add to the global hash
LOG.info('populating pub id hash')
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader)
if not self.check_fileheader(col, row, src_key):
pass
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')]) # likely unstable
# logicaldb = row[col.index('logicaldb')]
# logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode and object_key not in self.test_keys.get('pub'):
continue
# we use the J number here because
# it is the externally-accessible identifier
if prefixpart != 'J:':
continue
self.idhash['publication'][object_key] = accid
reference = Reference(graph, accid)
reference.addRefToGraph()
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
# 2nd pass, look up the MGI identifier in the hash
LOG.info("getting pub equivalent ids")
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader) # header already checked
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')])
logicaldb = row[col.index('logicaldb')].strip()
logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('pub'):
continue
jid = self.idhash['publication'].get(object_key)
pub_id = None
if logicaldb_key == '29': # pubmed
pub_id = 'PMID:' + accid
elif logicaldb_key == '1' and prefixpart[:4] == 'MGI:':
# don't get the J numbers,
# because we dont' need to make the equiv to itself.
pub_id = accid
elif logicaldb == 'Journal Link':
# some DOIs seem to have spaces
# FIXME MGI needs to FIX THESE UPSTREAM!!!!
# we'll scrub them here for the time being
accid = re.sub(r'\s+', '', accid)
# some DOIs have un-urlencoded brackets <>
accid = re.sub(r'<', '%3C', accid)
accid = re.sub(r'>', '%3E', accid)
pub_id = 'DOI:' + accid
elif logicaldb_key == '1' and re.match(r'J:', prefixpart):
# we can skip the J numbers
continue
if pub_id is not None:
# only add these to the graph if
# it's mapped to something we understand
reference = Reference(graph, pub_id)
# make the assumption that if it is a PMID, it is a journal
if re.match(r'PMID', pub_id):
reference.setType(self.globaltt['journal article'])
model.makeLeader(pub_id)
reference.addRefToGraph()
model.addSameIndividual(jid, pub_id)
else:
LOG.warning(
"Publication from (%s) not mapped for %s",
logicaldb, object_key)
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
def _process_prb_strain_view(self, limit):
"""
Process a table to get strains (with internal ids), and their labels.
These strains are created as instances of the species that they are.
Triples:
<strain id> a GENO:intrinsic_genotype
rdfs:label "strain label"
RO:in_taxon <NCBI taxon id>
:param limit:
:return:
"""
src_key = 'prb_strain_view'
# Only 9 strain types if we want to map them
# recombinant congenci, inbred strain, NA,
# congenic, consomic, coisogenic,
# recombinant inbred, NS, conplastic
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting strains and adding their taxa")
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain')].strip()
species = row[col.index('species')].strip()
if self.test_mode is True:
if int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is not None:
self.label_hash[strain_id] = strain
# add the species to the graph as a class
species = species.strip()
sp = self.resolve(species, False)
if sp == species:
LOG.error("No taxon mapping for " + species)
# they may tag a geo name on house mouse
if species[:17] == 'M. m. domesticus ':
LOG.warning("defaulting to Mus musculus")
sp = self.globaltt['Mus musculus']
else:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
elif species in MGI.unknown_taxa:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
model.addClassToGraph(sp, None)
geno.addTaxon(sp, strain_id)
model.addIndividualToGraph(strain_id, strain, sp)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_marker_view(self, limit):
"""
This is the definition of markers
(as in genes, but other genomic loci types as well).
It looks up the identifiers in the hashmap
This includes their labels, specific class, and identifiers
TODO should we use the mrk_mouse_view instead?
Triples:
<marker_id> a owl:Class OR owl:NamedIndividual
GENO:marker_type
rdfs:label <symbol>
RO:in_taxon <NCBITaxon_id>
:param limit:
:return:
"""
src_key = 'mrk_marker_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting markers and assigning types")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
(marker_key,
organism_key,
marker_status_key,
symbol,
name,
latin_name,
marker_type) = line.split('\t')
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# use only non-withdrawn markers
if marker_status_key != '2':
marker_id = self.idhash['marker'].get(marker_key)
# only pull info for mouse genes for now
# other species should come from other dbs
if organism_key != '1':
continue
if marker_id is None:
LOG.error(
"can't find %s %s in the id hash", marker_key, symbol)
# check "Not Applicable" -> "reference_locus"
mapped_marker_type = self.resolve(marker_type.strip())
# if it's unlocated, or is not a gene,
# then don't add it as a class because
# it's not added as a gene.
# everything except for genes are modeled as individuals
if mapped_marker_type in [
self.globaltt['gene'],
self.globaltt['pseudogene']]:
model.addClassToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['classes'].append(marker_id)
else:
model.addIndividualToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['indiv'].append(marker_id)
self.label_hash[marker_id] = symbol
# add the taxon (default to Mus m.)
# latin_name is not always a proper binomial
if latin_name in MGI.unknown_taxa: # localtt conflict
latin_name = 'Mus'
taxon_id = self.resolve(
latin_name, default=self.globaltt['Mus musculus'])
geno.addTaxon(taxon_id, marker_id)
# make MGI the leader for mouse genes.
if taxon_id == self.globaltt['Mus musculus']:
model.makeLeader(marker_id)
if not self.test_mode and limit is not None \
and line_num > limit:
break
def _process_mrk_summary_view(self, limit):
"""
Here we pull the mgiid of the features, and make equivalent (or sameAs)
associations to referenced ids.
Only adding the ENSEMBL genes and NCBI gene ids.
Will wait on other ids later.
:param limit:
:return:
"""
src_key = 'mrk_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting markers and equivalent ids from mrk_summary_view")
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True and \
int(object_key) not in self.test_keys.get('marker'):
continue
if preferred == '1':
if self.idhash['marker'].get(object_key) is None:
# can't find the marker in the hash; add it here:
self.idhash['marker'][object_key] = mgiid
LOG.error(
"this marker hasn't been seen before %s %s",
mgiid, short_description)
if accid == mgiid:
# don't need to make equivalences to itself
continue
mapped_id = None
if logicaldb_key == '60':
mapped_id = 'ENSEMBL:' + accid
elif logicaldb_key == '1':
# don't need to add the equivalence to itself.
continue
elif logicaldb_key == '55':
mapped_id = 'NCBIGene:' + accid
if mapped_id is not None:
if mgiid in self.markers['classes'] \
or subtype in ['Gene', 'Pseudogene']:
model.addClassToGraph(mapped_id, None)
model.addEquivalentClass(mgiid, mapped_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(mapped_id, None)
model.addSameIndividual(mgiid, mapped_id)
# could parse the "subtype" string
# to get the kind of thing the marker is
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mrk_acc_view(self):
"""
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
a second pass through the same file is made
:return:
"""
src_key = 'mrk_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
LOG.info("mapping markers to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip('\n')
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
# = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1':
self.idhash['marker'][object_key] = accid
def _process_mrk_acc_view_for_equiv(self, limit):
"""
Add the equivalences, either sameAs or equivalentClass,
depending on the nature of the marker.
We only process the ENSEMBL genes and NCBI gene ids.
:param limit:
:return:
"""
src_key = 'mrk_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces.
# TODO verify the difference between what the
# mrk_acc_view vs mrk_summary_view buys us here.
# if nothing, then we should remove one or the other.
LOG.info("mapping marker equivalent identifiers in mrk_acc_view")
line_num = 0
col = self.tables[src_key]['columns']
with open('/'.join((self.rawdir, src_key)), 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
organism_key = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# right now not caring about other organisms
if organism_key != 1:
continue
mgiid = self.idhash['marker'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
LOG.debug("can't find mgiid for %s", object_key)
continue
marker_id = None
if preferred == '1': # TODO what does it mean if it's 0?
if logicaldb_key == '55': # entrez/ncbi
marker_id = 'NCBIGene:' + accid
elif logicaldb_key == '1' and prefix_part != 'MGI:':
marker_id = accid
elif logicaldb_key == '60':
marker_id = 'ENSEMBL:' + accid
# TODO get non-preferred ids==deprecated?
if marker_id is not None:
if mgiid in self.markers['classes']:
model.addClassToGraph(marker_id, None)
model.addEquivalentClass(mgiid, marker_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(marker_id, None)
model.addSameIndividual(mgiid, marker_id)
else:
LOG.error("mgiid not in class or indiv hash %s", mgiid)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_acc_view(self, limit):
"""
Use this table to create the idmap between
the internal marker id and the public mgiid.
Also, add the equivalence statements between strains for MGI and JAX
Triples:
<strain_id> a GENO:intrinsic genotype
<other_strain_id> a GENO:intrinsic_genotype
<strain_id> owl:sameAs <other_strain_id>
:param limit:
:return:
"""
src_key = 'prb_strain_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("mapping strains to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
tax_id = self.globaltt["Mus musculus"]
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefixpart == 'MGI:' and preferred == '1':
self.idhash['strain'][object_key] = accid
model.addIndividualToGraph(
accid, self.globaltt['intrinsic genotype'], tax_id)
# The following are the stock centers for the strains
# (asterisk indicates complete)
# *1 MGI Mouse Genome Informatics
# *22 JAX Registry (null)
# *37 EMMA European Mutant Mouse Archive
# *38 MMRRC Mutant Mouse Regional Resource Center
# 39 Harwell Mammalian Genome Unit Stock List
# *40 ORNL Oak Ridge National Lab mutant resource
# *54 NCIMR NCI Mouse Repository
# *56 NMICE Neuromice.org, a consortium of three NIH-sponsored
# mutagenesis projects designed to search for
# neurological mutations
# 57 CARD Center for Animal Resources and Development @ Kumamoto U
# *70 RIKEN BRC RIKEN BioResource Center
# *71 CMMR Canadian Mouse Mutant Resource
# 84 JPGA The Center for New Mouse Models of
# Heart, Lung, BLood and Sleep Disorders,
# JAX-PGA at The Jackson Laboratory
# *87 MUGEN Network of Excellence in Integrated Functional Genomics
# in Mutant Mouse Models as Tools to Investigate the
# Complexity of Human Immunological Disease
# *90 APB Australian Phenomics Bank
# ? 91 EMS Elizabeth M. Simpson
# ? 93 NIG National Institute of Genetics,
# Mammalian Genetics Laboratory, Japan
# 94 TAC Taconic
# 154 OBS Oriental BioService , Inc.
# 161 RMRC-NLAC National Applied Research Laboratories,Taiwan, R.O.C.
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces
LOG.info("mapping strain equivalent identifiers")
line_num = 0
with open(raw, 'r') as reader:
reader.readline() # read the header row; skip
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
mgiid = self.idhash['strain'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
# LOG.info("can't find mgiid for %s",object_key)
continue
strain_id = None
deprecated = False
comment = None
if preferred == '1': # what does it mean if it's 0?
if logicaldb_key == '22': # JAX
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid).strip()
strain_id = 'JAX:' + accid
elif logicaldb_key == '38': # MMRRC
strain_id = accid
if not re.match(r'MMRRC:', strain_id):
strain_id = 'MMRRC:' + strain_id
elif logicaldb_key == '37': # EMMA
# replace EM: prefix with EMMA:, or for accid's
# with bare digits (e.g. 06335) prepend 'EMMA:'
strain_id = re.sub(r'^(EM:)*', 'EMMA:', accid)
elif logicaldb_key == '90': # APB
strain_id = 'APB:' + accid # Check
elif logicaldb_key == '40': # ORNL
# ORNL is not in existence any more.
# these are deprecated, and we will prefix with JAX
strain_id = 'JAX:' + accid
comment = "Originally from ORNL."
deprecated = True
# add these as synonyms of the MGI mouse
model.addSynonym(mgiid, accid)
elif logicaldb_key == '54': # NCIMR
strain_id = 'NCIMR:' + accid
# CMMR not great - doesn't resolve well
# elif logicaldb_key == '71':
# strain_id = 'CMMR:'+accid
elif logicaldb_key == '56': # neuromice
# neuromice.org doesn't exist any more.
# but all these are actually MGI ids
strain_id = accid
elif logicaldb_key == '70': # RIKEN
# like
# http://www2.brc.riken.jp/lab/animal/detail.php?brc_no=RBRC00160
strain_id = 'RBRC:RBRC' + accid
elif logicaldb_key == '87':
strain_id = 'MUGEN:' + accid
# I can't figure out how to get to some of the strains
# TODO get non-preferred ids==deprecated?
# TODO make these strains, rather than instance of taxon?
if strain_id is not None:
model.addIndividualToGraph(strain_id, None, tax_id)
if deprecated:
model.addDeprecatedIndividual(strain_id, [mgiid])
model.addSynonym(mgiid, accid)
else:
model.addSameIndividual(mgiid, strain_id)
if re.match(r'MMRRC', strain_id):
model.makeLeader(strain_id)
if comment is not None:
model.addComment(strain_id, comment)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mgi_note_vocevidence_view(self, limit):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'mgi_note_vocevidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting free text descriptions for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
note = row[col.index('note')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('notes'):
continue
# object_key == evidence._annotevidence_key
annotkey = self.idhash['notes'].get(object_key)
annot_id = self.idhash['annot'].get(annotkey)
# only add the description for the annotations
# we have captured through processing
if annot_id is not None:
model.addDescription(annot_id, note.strip())
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_location_cache(self, limit):
src_key = 'mrk_location_cache'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting marker locations")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
marker_key = row[col.index('_marker_key')].strip()
organism_key = row[col.index('_organism_key')].strip()
chromosome = row[col.index('chromosome')].strip()
startcoordinate = row[col.index('startcoordinate')].strip()
endcoordinate = row[col.index('endcoordinate')].strip()
strand = row[col.index('strand')].strip()
version = row[col.index('version')].strip()
# only get the location information for mouse
if str(organism_key) != '1' or str(chromosome) == 'UN':
continue
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# make the chromosome, and the build-instance
chrom_id = makeChromID(chromosome, 'NCBITaxon:10090', 'CHR')
if version is not None and version != '' and version != '(null)':
# switch on maptype or mapkey
assembly = version
build_id = 'NCBIGenome:' + assembly
geno.addChromosomeInstance(
chromosome, build_id, assembly, chrom_id)
chrom_id = makeChromID(chromosome, build_id, 'MONARCH')
if marker_key in self.idhash['marker']:
gene_id = self.idhash['marker'][marker_key]
feature = Feature(graph, gene_id, None, None)
if strand == '(null)' or strand == '':
strand = None
if startcoordinate == '(null)' or startcoordinate == '':
startcoordinate = None
if endcoordinate == '(null)' or endcoordinate == '':
endcoordinate = None
if startcoordinate is not None:
feature.addFeatureStartLocation(
int(float(startcoordinate)), chrom_id, strand)
else:
feature.addFeatureStartLocation(
startcoordinate, chrom_id, strand,
[self.globaltt['FuzzyPosition']])
if endcoordinate is not None:
feature.addFeatureEndLocation(
int(float(endcoordinate)), chrom_id, strand)
# note we don't add the uncertain end coordinate,
# because we don't know what it is.
add_as_class = False
if gene_id in self.markers['classes']:
add_as_class = True
feature.addFeatureToGraph(True, None, add_as_class)
else:
LOG.warning('marker key %s not in idhash', str(marker_key))
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def process_mgi_relationship_transgene_genes(self, limit=None):
"""
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
"""
src_key = 'mgi_relationship_transgene_genes'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting transgene genes")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
# rel_key = row[col.index('rel_key')].strip()
allele_key = int(row[col.index('object_1')])
allele_id = row[col.index('allele_id')]
# allele_label = row[col.index('allele_label')].strip()
# category_key = row[col.index('category_key')].strip()
# category_name = row[col.index('category_name')].strip()
# property_key = row[col.index('property_key')].strip()
# property_name = row[col.index('property_name')].strip()
gene_num = int(row[col.index('property_value')])
if self.test_mode and allele_key not in self.test_keys.get('allele') \
and gene_num not in self.test_ids:
continue
gene_id = 'NCBIGene:' + str(gene_num)
# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])
seqalt_id = self.idhash['seqalt'].get(allele_key)
if seqalt_id is None:
seqalt_id = allele_id
geno.addSequenceDerivesFrom(seqalt_id, gene_id)
if not self.test_mode and limit is not None and \
reader.line_num > limit:
break
def process_mgi_note_allele_view(self, limit=None):
"""
These are the descriptive notes about the alleles.
Note that these notes have embedded HTML -
should we do anything about that?
:param limit:
:return:
"""
src_key = 'mgi_note_allele_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Assembling notes on alleles")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
notehash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
notetype = row[col.index('notetype')].strip()
note = row[col.index('note')].strip()
sequencenum = row[col.index('sequencenum')].strip()
# read all the notes into a hash to concatenate
if object_key not in notehash:
notehash[object_key] = {}
if notetype not in notehash[object_key]:
notehash[object_key][notetype] = []
if len(notehash[object_key][notetype]) < int(sequencenum):
for i in range(
len(notehash[object_key][notetype]),
int(sequencenum)
):
notehash[object_key][notetype].append('')
notehash[object_key][notetype][int(sequencenum) - 1] = note.strip()
# finish iteration over notes
line_num = 0
for allele_key in notehash:
line_num += 1
if self.test_mode is True:
if int(allele_key) not in self.test_keys.get('allele'):
continue
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
continue
for n in notehash[allele_key]:
# pretty chatty for expected behavior
# LOG.info(
# "found %d %s notes for %s",
# len(notehash[allele_key]), n, allele_id)
notes = ''.join(notehash[allele_key][n])
notes += ' [' + n + ']'
model.addDescription(allele_id, notes)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_genotype_view(self, limit=None):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'prb_strain_genotype_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Getting genotypes for strains")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
if self.test_mode is True and \
int(genotype_key) not in self.test_keys.get('genotype') \
and int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is None:
strain_id = self._make_internal_identifier(
'strain', strain_key)
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id is None:
genotype_id = self._make_internal_identifier(
'genotype', genotype_key)
if strain_id is not None and genotype_id is not None:
self.strain_to_genotype_map[strain_id] = genotype_id
graph.addTriple(strain_id, self.globaltt['has_genotype'], genotype_id)
# TODO
# verify if this should be contingent on the exactness or not
# if qualifier == 'Exact':
# gu.addTriple(
# graph, strain_id,
# self.globaltt['has_genotype'],
# genotype_id)
# else:
# gu.addXref(graph, strain_id, genotype_id)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _make_internal_identifier(self, prefix, key):
"""
This is a special MGI-to-MONARCH-ism.
MGI tables have unique keys that we use here, but don't want to
necessarily re-distribute those internal identifiers.
Therefore, we make them into keys in a consistent way here.
:param prefix: the object type to prefix the key with,
since the numbers themselves are not unique across tables
:param key: the number
:return:
"""
# these are just more blank node identifiers
iid = self.make_id('mgi' + prefix + 'key' + key, '_')
return iid
# def _querysparql(self):
#
# #load the graph
# vg = Graph()
# vg.parse(self.outfile, format="turtle")
#
# qres = g.query(
# """SELECT DISTINCT ?aname ?bname
# WHERE {
# ?a foaf:knows ?b .
# ?a foaf:name ?aname .
# ?b foaf:name ?bname .
# }""")
#
# for row in qres:
# print("%s knows %s" % row)
|
monarch-initiative/dipper
|
dipper/sources/MGI.py
|
MGI.py
|
py
| 99,120
|
python
|
en
|
code
| 53
|
github-code
|
6
|
30086443751
|
import os
import pickle
import numpy as np
from .util import draw_roc
from .statistic import get_EER_states, get_HTER_at_thr
from sklearn.metrics import roc_auc_score
def eval_acer(results, is_print=False):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param is_print: print eval score
:return: score
"""
ind_n = (results[:, 1] == 0)
ind_p = (results[:, 1] == 1)
fp = (results[ind_n, 0] == 1).sum()
fn = (results[ind_p, 0] == 0).sum()
apcer = fp / ind_n.sum() * 100
bpcer = fn / ind_p.sum() * 100
acer = (apcer + bpcer) / 2
if is_print:
print('***************************************')
print('APCER BPCER ACER')
print('{:.4f} {:.4f} {:.4f}'.format(apcer, bpcer, acer))
print('***************************************')
return 100 - acer
def eval_hter(results, is_print=False):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param is_print: print eval score
:return: score
"""
prob_list = results[:, 0]
label_list = results[:, 1]
cur_EER_valid, threshold, FRR_list, FAR_list = get_EER_states(prob_list, label_list)
auc_score = roc_auc_score(label_list, prob_list)
draw_roc(FRR_list, FAR_list, auc_score)
cur_HTER_valid = get_HTER_at_thr(prob_list, label_list, threshold)
if is_print:
print('***************************************')
print('EER HTER AUC Thr')
print('{:.4f} {:.4f} {:.4f} {:.4f}'.format(
cur_EER_valid * 100, cur_HTER_valid * 100, auc_score * 100, threshold))
print('***************************************')
return (1 - cur_HTER_valid) * 100
def eval_acc(results, is_print=False):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param is_print: print eval score
:return: score
"""
acc = (results[:, 0] == results[:, 1]).sum() / results.shape[0] * 100
if is_print:
print('*****************')
print('ACC Pos')
print('{:.2f} {}'.format(acc, int(results[:, 0].sum())))
print('*****************')
return acc
def eval_metric(results, thr='auto', type='acc', res_dir=None):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param type: acc acer or hter
:param res_dir: save eval results
:return: best score
"""
eval_tools = dict(
acc=eval_acc,
acer=eval_acer,
hter=eval_hter)
results = np.array(results)
if type not in ['acc', 'acer', 'hter']:
raise NotImplementedError
elif type == 'hter':
eval_score = eval_hter(results, is_print=True)
return eval_score
else:
eval_tool = eval_tools[type]
if isinstance(thr, float):
results[:, 0] = (results[:, 0] > thr).astype(np.float)
results = results.astype(np.int)
return eval_tool(results, is_print=True)
min_score = results[:, 0].min()
max_score = results[:, 0].max()
s_step = (max_score - min_score) / 1000
scores = []
thrs = []
for i in range(1000):
thre = min_score + i * s_step
thrs.append(thre)
result = results.copy()
result[:, 0] = (results[:, 0] > thre).astype(np.float)
result = result.astype(np.int)
score = eval_tool(result, is_print=False)
scores.append(score)
max_ind = np.argmax(np.array(scores))
if thr == 'mid':
sinds = np.argsort(results[:, 0])
best_thr = results[sinds[int(results.shape[0]/2)-1], 0]
else:
best_thr = thrs[max_ind]
print('Best Threshold: {:.4f}'.format(best_thr))
save_results = np.zeros((results.shape[0], 3))
save_results[:, 2] = results[:, 0]
results[:, 0] = (results[:, 0] > best_thr).astype(np.float)
save_results[:, :2] = results[:, :2]
eval_score = eval_tool(results, is_print=True)
if res_dir is not None:
res_dir = os.path.join(res_dir, 'res_{}.pkl'.format(int(eval_score * 10)))
with open(res_dir, 'wb') as file:
pickle.dump(save_results, file)
return eval_score
|
VIS-VAR/LGSC-for-FAS
|
utils/eval.py
|
eval.py
|
py
| 4,112
|
python
|
en
|
code
| 223
|
github-code
|
6
|
24556414335
|
import json
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask_bcrypt import Bcrypt
from flask_redis import FlaskRedis
app = Flask(__name__)
app.config['SECRET_KEY'] = 'ghjrhhrohirorthrtohi'
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://root:root@localhost/quizapp"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
redis_cli = FlaskRedis(app)
bcrypt = Bcrypt(app)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
return {"status": "active", "message": "You are now in home page"}
@app.route('/register', methods=['POST'])
def register():
response = {"status": "False", "message": "Error occurred"}
try:
from models import User
data = request.get_json()
print(data)
hashed_password = bcrypt.generate_password_hash(data.get("password")).decode('utf-8')
register_data = User(
username=data.get('username'),
email=data.get('email'),
password=hashed_password,
contact_no=data.get('contact_no')
)
print(register_data)
db.session.add(register_data)
db.session.commit()
response = {"status": "True", "message": "data stored successfully"}
return response
except Exception as e1:
response["message"] = "Exception occurred", str(e1)
return response
@app.route('/login', methods=['POST'])
def login():
from models import User
return_response = {"status": False, "message": "Error occurred"}
try:
if request.method == "POST":
data = request.get_json()
print(data)
email = data.get('email')
user = User.query.filter_by(email=email).first()
if user and bcrypt.check_password_hash(user.password, data.get('password')):
import codecs
redis_cli.set('id', user.id)
redis_cli.set('username', user.username)
redis_un = codecs.decode(redis_cli.get('username'), 'UTF-8')
return_response = {"status": "True", "message": "Logged in successfully", "flag": "1",
"username": redis_un}
return return_response
else:
return_response = {"status": "False", "message": "Please enter valid input"}
return return_response
except Exception as e1:
return_response["message"] = "Exception occurred", str(e1)
return return_response
@app.route('/quiz', methods=['GET', 'POST'])
def quiz():
from models import QA
try:
data = request.get_json()
print(data)
if data.get("data") is not None:
import random
qes = db.session.query(QA).filter(QA.sub_name.in_(eval(data.get('data')))).limit(5).all()
qa_list = []
for qa in qes:
qa_data = {'id': qa.id, 'question': qa.question,
'options': json.loads(qa.options)}
qa_list.append(qa_data)
response = {"status": "True", "message": "data stored successfully"}
return jsonify({'response': response, "data": qa_list})
else:
import random
questions = QA.query.order_by(func.random()).limit(5).all()
qa_list = []
for qa in questions:
qa_data = {'id': qa.id, 'sub_name': qa.sub_name, 'question': qa.question,
'options': json.loads(qa.options)}
qa_list.append(qa_data)
response = {"status": "True", "message": "data stored successfully"}
return {'response': response, 'data': qa_list}
except:
return "{'error':'invalid data'}"
@app.route('/view_que', methods=['GET', 'POST'])
def view_que():
from models import QA
try:
questions = QA.query.all()
correct_options = json.dumps([x.correct_opt for x in questions])
print("correct_option", correct_options)
redis_cli.set('correct_opt', correct_options)
print("redis get", redis_cli.get('correct_opt'))
qa_list = []
for qa in questions:
qa_data = {'id': qa.id, 'sub_name': qa.sub_name, 'question': qa.question, 'options': json.loads(qa.options),
'correct_opt': qa.correct_opt}
qa_list.append(qa_data)
return jsonify({'status': True, 'data': qa_list})
except Exception as e:
return {"error": e}
@app.route("/delete", methods=['POST'])
def delete():
try:
from models import QA
data = request.get_json()
qa = QA.query.filter_by(id=data.get('id')).first()
local_object = db.session.merge(qa)
db.session.delete(local_object)
db.session.commit()
return jsonify({"Status": True, "data": "Data deleted successfully "})
except Exception as e:
return {"error": e}
@app.route("/edit", methods=['GET', 'POST'])
def edit():
try:
from models import QA
data = request.get_json()
qa = QA.query.filter_by(id=data.get('id')).first()
qa_list = []
qa_data = {'id': qa.id, 'sub_name': qa.sub_name, 'question': qa.question, 'options': json.loads(qa.options),
'correct_opt': qa.correct_opt}
qa_list.append(qa_data)
return jsonify({'status': True, 'data': qa_list})
except Exception as e:
return {"error": e}
@app.route("/add", methods=['POST'])
def add():
try:
response = {"status": "True", "message": "data added successfully"}
if request.method == 'POST':
from models import QA
data = request.get_json()
ques = data.get('question')
sub = data.get('sub_name')
data1 = {
"option1": data.get('option1'),
"option2": data.get('option2'),
"option3": data.get('option3'),
"option4": data.get('option4')
}
options = data1
correct_opt = data.get('correct_option')
qa = QA(question=ques, options=json.dumps(options), sub_name=sub, correct_opt=correct_opt)
db.session.add(qa)
db.session.commit()
return response
except Exception as e:
return {"error": e}
@app.route("/update", methods=['GET', 'POST'])
def update():
try:
from models import QA
data = request.get_json()
response = {"status": "True", "message": "data updated successfully"}
if request.method == 'POST':
ques = data.get('question')
sub = data.get('subject')
data1 = {
"option1": data.get('option1'),
"option2": data.get('option2'),
"option3": data.get('option3'),
"option4": data.get('option4')
}
options = data1
correct_opt = data.get('correct_opt')
qa = QA(question=ques, options=json.dumps(options), id=data.get('id'), correct_opt=correct_opt,
sub_name=sub)
local_object = db.session.merge(qa)
db.session.add(local_object)
db.session.commit()
return response
except Exception as e:
return {"error": e}
@app.route('/taken_quiz', methods=['POST'])
def taken_quiz():
try:
if request.method == "POST":
from models import User, QA
redis_id = int(redis_cli.get('id'))
print("redis stored id", redis_id)
import codecs
redis_corr = eval(codecs.decode(redis_cli.get('correct_opt'), 'UTF-8'))
data = request.get_json()
question1 = db.session.query(QA).filter(QA.id.in_(data.get('questions'))).all()
main_dict = {x.id: {'question': x.question, 'correct_opt': x.correct_opt} for x in question1}
user_result = {
"question": data.get('questions'),
"select_option": data.get('selected_option')
}
count = 0
for i in redis_corr:
if i in user_result["select_option"]:
count = count + 1
questions = data['questions']
sel_opt = data['selected_option']
for q in questions:
main_dict[int(q)].update({
'selected_option': sel_opt[questions.index(q)]
})
main_dict["score"] = count
user1 = User.query.filter_by(id=redis_id).first()
if user1.user_result in [None, ""]:
user1.user_result = json.dumps([main_dict])
user1.score = count
local_object = db.session.merge(user1)
db.session.add(local_object)
db.session.commit()
return {"status": True, "data": json.dumps(main_dict)}
else:
old_data = json.loads(user1.user_result)
old_data.append(main_dict)
user1.user_result = json.dumps(old_data)
local_object = db.session.merge(user1)
db.session.add(local_object)
db.session.commit()
return {"data": json.dumps(main_dict), "score": count}
except Exception as e:
return {"error": e}
@app.route('/result', methods=['POST', 'GET'])
def result():
try:
from models import User
redis_id = int(redis_cli.get('id'))
user1 = User.query.filter_by(id=redis_id).first()
if user1.user_result in [None, ""]:
return jsonify({"response": "No Quiz Taken Yet"})
user_result = json.loads(user1.user_result)
return {"response": user_result}
except Exception as e:
return {"error": e}
|
Ankita2802/Quiz_backend
|
routes.py
|
routes.py
|
py
| 9,890
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22688604701
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import Product
from apps.customers.models import Customer
from django.views.decorators.http import require_POST
from .cart import Cart
from .forms import CartAddProductForm
@login_required
def shop_index(request):
# Count the number of products in the databse
product_count = Product.objects.count()
# Count the number of customers in the database
customer_count = Customer.objects.count()
context = {
# 'products': products,
'product_count': product_count,
'customer_count': customer_count,
}
return render(request, 'shop/shop_index.html', context)
@login_required
def product_list(request):
products = Product.objects.all()
context = {
'products': products,
}
return render(request, 'shop/product_list.html', context)
@login_required
def product_detail(request, id):
product = get_object_or_404(Product, id=id)
cart_product_form = CartAddProductForm()
context = {
'product': product,
'cart_product_form': cart_product_form
}
return render(request, 'shop/product_detail.html', context)
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product, quantity=cd['quantity'], override_quantity=cd['override'])
return redirect('shop:cart_detail')
@require_POST
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('shop:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={
'quantity': item['quantity'],
'override': True})
return render(request, 'shop/cart_detail.html', {'cart': cart})
|
ordemdigitale/django-crm-v2
|
apps/shop/views.py
|
views.py
|
py
| 2,148
|
python
|
en
|
code
| 1
|
github-code
|
6
|
32509272843
|
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
import scipy.stats as sc
def myRho(T,n):
COV=0;
pq =0 ;
SY=0;
SX=0;
EX=sum(T[0][0:])/float(len(T[0][0:])) #La experence de x
EY=sum(T[1][0:])/float(len(T[1][0:])) #La experence de y
for i in range(n):
COV = COV + (T[0][i] - EX) * (T[1][i] - EY)
SX = SX + (T[0][i] - EX) ** 2
SY = SY + (T[1][i] - EY) ** 2
pq = COV /sqrt(SX * SY) #La correlation
COV = COV / n #La covolution de x y
t=pq/sqrt((1-pq**2)/(n-2)) #La statistique sur la correlation de x y
return (COV,pq,t)
def mysort(X) :
for b in range(0,2):
GR = 0
T = []
T=X[b][0:]
Tu = np.unique(T)
if b==0 :
GRAX = np.zeros(np.shape(T), dtype='float')
if b==1 :
GRAX1 = np.zeros(np.shape(T), dtype='float')
for i in Tu:
iNd = np.where(T == i)[0]
nx = len(iNd)
if b==0:
GRAX[iNd] = np.mean(np.arange(GR, GR + nx))
GR = GR + nx
if b==1:
GRAX1[iNd] = np.mean(np.arange(GR, GR + nx))
GR = GR + nx
return(GRAX,GRAX1)
X = np.random.uniform (0, 12, 500)
Y=np.exp(X)+np.random.normal(0,1,500)
M = np.zeros((500, 2));
A = np.random.randint(0, 15, 10)
B=np.random.randint(0, 15, 10)
C=list([X,Y])
D,R=mysort(C)
print(C)
print(D,R)
plt.figure(2)
plt.scatter(D,R, marker='+')
C=list([D,R])
a=myRho(C,500)
print(a)
plt.show()
|
Varelafv/TD6.py
|
exo3.py
|
exo3.py
|
py
| 1,511
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20678009932
|
from django.test import TestCase
from django.urls import reverse
from apps.articles.models import Tag
from apps.users.models import CustomUser
from .models import Tool
# Create your tests here.
test_tool = {
"name": "tool_name",
"slug": "tool_slug",
"description": "tool_description",
"img_link": "https://tool_img_link.com/tool.png",
"link": "https://tool_link.com",
}
test_tag = {
"tag_name": "test_tag",
"img_link": "https://test_tag.org/test.png",
"description": "long test description",
"slug": "test_tag",
"source_link": "https://test_tag.org/",
}
normal_user = {"username": "normal", "email": "normal@user.com", "password": "foo"}
class TestToolsModels(TestCase):
def setUp(self):
self.test_obj = test_tool
self.test_tag = test_tag
tool = Tool.objects.create(**self.test_obj)
tool.tags.add(Tag.objects.create(**self.test_tag))
tool.save()
def test_tool_created(self):
obj = Tool.objects.get(name=self.test_obj["name"])
self.assertEqual(obj.name, self.test_obj["name"])
self.assertEqual(obj.slug, self.test_obj["slug"])
self.assertEqual(obj.description, self.test_obj["description"])
self.assertEqual(obj.img_link, self.test_obj["img_link"])
self.assertEqual(obj.link, self.test_obj["link"])
self.assertEqual(obj.tags.all()[0].tag_name, self.test_tag["tag_name"])
class TestToolsListViews(TestCase):
def setUp(self):
self.test_obj = test_tool
self.test_tag = test_tag
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
tool = Tool.objects.create(**self.test_obj)
tool.tags.add(Tag.objects.create(**self.test_tag))
tool.save()
def test_tools_list_view(self):
obj = Tool.objects.all()
response = self.client.get(reverse("tools_list"))
self.assertQuerysetEqual(response.context["tools"], obj, transform=lambda x: x)
self.assertTemplateUsed(response, "tools/tools_list.html")
self.assertEqual(response.status_code, 200)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("tools_list"))
self.assertEqual(response.context["main_author"], main_author)
class TestToolsDetailViews(TestCase):
def setUp(self):
self.test_obj = test_tool
self.test_tag = test_tag
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
tool = Tool.objects.create(**self.test_obj)
tool.tags.add(Tag.objects.create(**self.test_tag))
tool.save()
def test_tools_detail_view(self):
obj = Tool.objects.get(name=self.test_obj["name"])
response = self.client.get(
reverse("tool_detail", kwargs={"slug": self.test_obj["slug"]})
)
self.assertEqual(response.context["tool"], obj)
self.assertTemplateUsed(response, "tools/tool_detail.html")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["tool"].name, self.test_obj["name"])
self.assertEqual(response.context["tool"].slug, self.test_obj["slug"])
self.assertEqual(
response.context["tool"].description, self.test_obj["description"]
)
self.assertEqual(response.context["tool"].img_link, self.test_obj["img_link"])
self.assertEqual(response.context["tool"].link, self.test_obj["link"])
self.assertEqual(
response.context["tool"].tags.all()[0].tag_name, self.test_tag["tag_name"]
)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("tools_list"))
self.assertEqual(response.context["main_author"], main_author)
|
akundev/akundotdev
|
apps/tools/tests.py
|
tests.py
|
py
| 4,039
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39830056794
|
import sys
from collections import deque
sys.setrecursionlimit(10**7)
n = int(sys.stdin.readline().rstrip())
k = int(sys.stdin.readline().rstrip())
graph = [[0] * n for _ in range(n)]
direction = deque()
moves = [[0, 1], [1, 0], [0, -1], [-1, 0]]
snake = deque()
for i in range(k):
x, y = map(int, sys.stdin.readline().rstrip().split())
graph[x - 1][y - 1] = 1
l = int(sys.stdin.readline().rstrip())
for i in range(l):
direction.append(list(map(str, sys.stdin.readline().rstrip().split())))
dtime, dire = direction.popleft()
def moveSnake(x, y, time, d):
global dtime, dire
# 현재위치가 벽 또는 뱀 자신일경우 종료
if x < 0 or x >= n or y < 0 or y >= n or graph[x][y] == 2:
return time
# 현재 칸에 사과가 없고 뱀 큐가 있을경우
if graph[x][y] != 1 and snake:
# 뱀 큐에서 가장처음 좌표를 추출
sx, sy = snake.popleft()
# 해당 좌표를 빈칸으로 수정
graph[sx][sy] = 0
# 그래프에서 현재 좌표를 뱀이 있는 처리
graph[x][y] = 2
# 전체 뱀의 좌표에 현재 좌표 추가
snake.append([x, y])
# 현재 시간과 방향변경시간이 같을경우
if time == int(dtime):
# 왼쪽
if dire == 'L':
d = d - 1 if d > 0 else 3
# 오른쪽
else:
d = d + 1 if d < 3 else 0
# 아직 변경할 방향이 남아있으면 큐애서 꺼내어 변수에 저장
if direction:
dtime, dire = direction.popleft()
return moveSnake(x + moves[d][0], y + moves[d][1], time + 1, d)
print(moveSnake(0, 0, 0, 0))
|
omg7152/CodingTestPractice
|
Etc/Snake_3190.py
|
Snake_3190.py
|
py
| 1,660
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
70926690428
|
"""
youtube_downloader.py notes:
- May occasionally have errors. Just re-run.
- Caches to prevent duplicate downloading of videos.
"""
from pytube import YouTube
def download_youtube(video_url, videoname='0'):
if check_cache(video_url):
print(f"youtube_downloader.py: Video already exists.")
return
else:
# print(f"youtube_downloader.py: Downloading \"{videoname}\".")
append_cache(video_url)
yt = YouTube(video_url)
yt.streams \
.filter(progressive=True, file_extension='mp4') \
.order_by('resolution')[-1] \
.download(output_path='videos',
filename=videoname)
# Cache prevents downloading of duplicate videos from similar search terms
def append_cache(text, cachefile="video_indexer/downloaded.txt"):
"""Append the text to a cache file"""
with open(cachefile, "a") as f:
f.write(text+'\n')
def read_cache(cachefile="video_indexer/downloaded.txt"):
"""Return the list of text from cache file"""
with open(cachefile, 'r') as f:
cache = [video_id.strip() for video_id in f]
return cache
def check_cache(text, cachefile="video_indexer/downloaded.txt"):
"""Check if cachefile contains given text"""
cache = read_cache(cachefile)
return text in cache
|
jetnew/carelytics
|
video_indexer/youtube_downloader.py
|
youtube_downloader.py
|
py
| 1,291
|
python
|
en
|
code
| 3
|
github-code
|
6
|
40281678144
|
colors = {
'gray' :( 0.56862745, 0.56862745, 0.56862745, 1),
'orange':( 0.96470588, 0.34509804, 0.05882352, 1),
'green' :( 0.50196078, 0.91372549, 0.09019607, 1),
'white' :( 0.8 , 0.8, 0.8, 1),
'yellow' :( 1.0, 0.792156862745098, 0.0941176470588235, 0.3),
'mark' : (0.9058823529411765, 1.0, 0.4431372549019608, 0.5)
}
class tile:
mark = colors['mark']
FLOOR = 1
VOID = 0
EDGE = 2
TELEPORT = 3
def __init__(self, typ, obj, location,tpLocation = None):
self.type = typ
self.obj = obj
self.location = location
self.colors = colors['white']
self.setColor()
self.tpLocation = tpLocation
def setColor(self):
if self.type == 2 or self.type == 1:
self.colors = colors['gray']
if self.type == 3:
self.colors = colors['orange']
if self.obj != None:
if '$' in str(self.obj.symbol):
self.colors = colors['yellow']
def checkTile(self, box):
for child in box.location:
if child == self.location:
# void
if self.type == tile.VOID:
if self.obj != None:
if self.obj.symbol == "$":
return True
else: return False
else: return False
# floor
else:
return True
return False
def getLocation(self):
return self.location
def setObj(self, obj):
self.obj = obj
self.setColor()
|
NunoSilvaa/AI_project
|
model/tiles.py
|
tiles.py
|
py
| 1,742
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29214477520
|
import pytest
from datetime import datetime
from ..forms import PostForm, CategoryForm, CommentForm
from accounts.models import Profile, User
from ..models import Post
@pytest.fixture
def create_test_user():
data = {"email": "test@test.com", "password": "a/1234567"}
return User.objects.create_user(**data, is_verify=True)
@pytest.fixture
def user_profile(create_test_user):
user = create_test_user
return Profile.objects.get(user=user)
@pytest.fixture
def create_post(user_profile):
data = {
"author": user_profile,
"title": "test title",
"content": "test content",
"published_date": datetime.now(),
}
return Post.objects.create(**data)
class TestPostForm:
def test_post_form_valid_data(self):
data = {
"title": "test title",
"content": "test content",
"published_date": datetime.now(),
"captcha": 1,
}
post = PostForm(data=data)
# post is not valid because of captcha field
assert not post.is_valid()
assert len(post.errors) == 1
assert post.has_error("captcha")
def test_post_form_missing_field_data(self):
data = {
"title": "test title",
"content": "test content",
"captcha": 1,
}
post = PostForm(data=data)
# post is not valid because of captcha field and
# missing published_date field
assert not post.is_valid()
assert len(post.errors) == 2
assert post.has_error("captcha")
assert post.has_error("published_date")
def test_post_form_no_data(self):
post = PostForm()
assert not post.is_valid()
@pytest.mark.django_db
class TestCommentForm:
def test_comment_form_valid_data(self, create_post):
data = {
"post": create_post.id,
"name": "test",
"email": "test@test.com",
"message": "test message",
}
comment = CommentForm(data=data)
assert comment.is_valid()
assert len(comment.errors) == 0
def test_comment_form_invalid_data(self, create_post):
data = {
"post": create_post.id,
"name": "test",
"email": "invalid email format",
"message": "test message",
}
comment = CommentForm(data=data)
assert not comment.is_valid()
assert len(comment.errors) == 1
def test_comment_form_invalid_data_missing_post_field(self):
data = {
"name": "test",
"email": "test@test.com",
"message": "test message",
}
comment = CommentForm(data=data)
assert not comment.is_valid()
assert len(comment.errors) == 1
class TestCategoryForm:
def test_category_form_valid_data(self):
data = {"name": "test", "captcha": 1}
cat = CategoryForm(data=data)
# category is not valid because of captcha field
assert not cat.is_valid()
assert len(cat.errors) == 1
assert cat.has_error("captcha")
def test_category_form_invalid_data(self):
cat = CategoryForm()
assert not cat.is_valid()
|
smz6990/DRF-Blog
|
core/blog/tests/test_forms.py
|
test_forms.py
|
py
| 3,203
|
python
|
en
|
code
| 2
|
github-code
|
6
|
36164982911
|
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1500,200)
MainWindow.setStyleSheet("background-color: #282828")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.search = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(32)
self.search.setFont(font)
self.search.setToolTipDuration(-3)
self.search.setStyleSheet("background-color: #161a1e; color: white; border: 3px solid #161a1e")
self.search.setObjectName("search")
self.gridLayout.addWidget(self.search, 0, 0, 1, 4)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(32)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("background-color: #161a1e; color: white;")
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 0, 4, 1, 1)
self.line_1 = QtWidgets.QFrame(self.centralwidget)
self.line_1.setStyleSheet("")
self.line_1.setObjectName("line_1")
self.line_1.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_1.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_1, 1, 0, 1, 5)
self.marketLabel_1 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_1.setFont(font)
self.marketLabel_1.setStyleSheet("color: white")
self.marketLabel_1.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_1.setFixedHeight(40)
self.marketLabel_1.setObjectName("marketLabel_1")
self.gridLayout.addWidget(self.marketLabel_1, 2, 0, 1, 1)
self.marketLabel_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_2.setFont(font)
self.marketLabel_2.setStyleSheet("color: white")
self.marketLabel_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_2.setFixedHeight(40)
self.marketLabel_2.setObjectName("marketLabel_2")
self.gridLayout.addWidget(self.marketLabel_2, 2, 1, 1, 1)
self.marketLabel_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_3.setFont(font)
self.marketLabel_3.setStyleSheet("color: white")
self.marketLabel_3.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_3.setFixedHeight(40)
self.marketLabel_3.setObjectName("marketLabel_3")
self.gridLayout.addWidget(self.marketLabel_3, 2, 2, 1, 1)
self.marketLabel_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_4.setFont(font)
self.marketLabel_4.setStyleSheet("color: white")
self.marketLabel_4.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_4.setFixedHeight(40)
self.marketLabel_4.setObjectName("marketLabel_4")
self.gridLayout.addWidget(self.marketLabel_4, 2, 3, 1, 1)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setStyleSheet("")
self.line_2.setObjectName("line_2")
self.line_2.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_2, 3, 0, 1, 5)
self.tradingPairs = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.tradingPairs.setFont(font)
self.tradingPairs.setStyleSheet("color: white;")
self.tradingPairs.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.tradingPairs.setFixedHeight(40)
self.tradingPairs.setObjectName("tradingPairs")
self.gridLayout.addWidget(self.tradingPairs, 4, 0, 1, 1)
self.lastTradedPrice = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.lastTradedPrice.setFont(font)
self.lastTradedPrice.setStyleSheet("color: white;")
self.lastTradedPrice.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.lastTradedPrice.setObjectName("lastTradedPrice")
self.gridLayout.addWidget(self.lastTradedPrice, 4, 1, 1, 1)
self.percentageChange = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.percentageChange.setFont(font)
self.percentageChange.setStyleSheet("color: white;")
self.percentageChange.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.percentageChange.setObjectName("percentageChange")
self.gridLayout.addWidget(self.percentageChange, 4, 2, 1, 1)
self.turnover = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.turnover.setFont(font)
self.turnover.setStyleSheet("color: white;")
self.turnover.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.turnover.setObjectName("label_5")
self.gridLayout.addWidget(self.turnover, 4, 3, 1, 1)
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setStyleSheet("")
self.line_3.setObjectName("line_3")
self.line_3.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_3, 5, 0, 1, 5)
self.notificationsLabel = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel.setFont(font)
self.notificationsLabel.setToolTipDuration(-3)
self.notificationsLabel.setStyleSheet("color: white")
self.notificationsLabel.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel, 6, 0, 1, 5)
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setStyleSheet("")
self.line_4.setObjectName("line_5")
self.line_4.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_4, 7, 0, 1, 5)
self.notificationsLabel_1 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_1.setFont(font)
self.notificationsLabel_1.setToolTipDuration(-3)
self.notificationsLabel_1.setStyleSheet("color: white")
self.notificationsLabel_1.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_1.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_1, 8, 0, 1, 1)
self.notificationsLabel_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_2.setFont(font)
self.notificationsLabel_2.setToolTipDuration(-3)
self.notificationsLabel_2.setStyleSheet("color: white")
self.notificationsLabel_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_2.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_2, 8, 1, 1, 1)
self.notificationsLabel_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_3.setFont(font)
self.notificationsLabel_3.setToolTipDuration(-3)
self.notificationsLabel_3.setStyleSheet("color: white")
self.notificationsLabel_3.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_3.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_3, 8, 2, 1, 1)
self.notificationsLabel_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_4.setFont(font)
self.notificationsLabel_4.setToolTipDuration(-3)
self.notificationsLabel_4.setStyleSheet("color: white")
self.notificationsLabel_4.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_4.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_4, 8, 3, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "найти"))
self.marketLabel_1.setText(_translate("MainWindow", "Торговая пара"))
self.marketLabel_2.setText(_translate("MainWindow", "Цена"))
self.marketLabel_3.setText(_translate("MainWindow", "Изменения, %"))
self.marketLabel_4.setText(_translate("MainWindow", "Объемы"))
self.notificationsLabel.setText(_translate("MainWindow", "Уведомления"))
self.notificationsLabel_1.setText(_translate("MainWindow", "Торговая пара"))
self.notificationsLabel_2.setText(_translate("MainWindow", "Цена"))
self.notificationsLabel_3.setText(_translate("MainWindow", "Изменения, %"))
self.notificationsLabel_4.setText(_translate("MainWindow", "Дата и время"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec())
|
Framon64/CryptoProgram
|
programWindow.py
|
programWindow.py
|
py
| 11,677
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35816996435
|
"""
chemreac.util.pyutil
--------------------
Utility functions used throughout chemreac.
"""
from __future__ import (absolute_import, division, print_function)
import sys
import numpy as np
import time
def monotonic(arr, positive=0, strict=False):
"""
Check monotonicity of a serie
Parameters
----------
arr: array_like
Array to be checked for monotonicity
positive: -1, 0 or 1 (default: 0)
-1: negative, 1: positive, 0: either
strict: bool (default: False)
Disallow zero difference between neighboring instances
Examples
--------
>>> monotonic([0, 0, -1, -2])
True
>>> monotonic([0, 0, 1, 2], strict=True)
False
>>> monotonic([1, 2, 3], -1)
False
Returns
-------
bool
"""
if positive not in (-1, 0, 1):
raise ValueError("positive should be either -1, 0 or 1")
delta = np.diff(arr)
if positive in (0, 1):
if strict:
if np.all(delta > 0):
return True
else:
if np.all(delta >= 0):
return True
if positive in (0, -1):
if strict:
if np.all(delta < 0):
return True
else:
if np.all(delta <= 0):
return True
return False
def set_dict_defaults_inplace(dct, *args):
"""
Modifies a dictionary in-place by populating key/value pairs present in the
default dictionaries which have no key in original dictionary `dct`. Useful
for passing along keyword argument dictionaries between functions.
Parameters
----------
dct: dict
*args: dictionaries
Returns
-------
dct: (possibly modified) input dictionary
Examples
--------
>>> d = {1: None}
>>> set_dict_defaults_inplace(d, {2: []})
>>> d == {1: None, 2: []}
True
>>> f = {'a': 1, 'b': 3}
>>> g = {'a': 1}
>>> set_dict_defaults_inplace(g, {'b': 2, 'a': 7}, {'b': 3})
>>> f == g
True
>>> h = {42: True, 'b': 3}
>>> i = {}
>>> set_dict_defaults_inplace(i, {42: True, 'b': 2}, {'b': 3})
>>> h == i
True
"""
ori_dct_keys = dct.keys()
new_dct = {}
for defaults in args:
for k, v in defaults.items():
if k not in ori_dct_keys:
new_dct[k] = v
dct.update(new_dct)
class progress(object):
""" Print a progress bar of dots
Parameters
----------
iterable: iterable
must have :attr:`__len__`
output: fileobject
default: sys.stdout
proc_time: bool
show process time (in seconds) passed at end of iteration.
Examples
--------
>>> vals = list(range(7))
>>> squares = []
>>> for val in progress(vals, proc_time=False):
... squares.append(val**2)
...
7: .......
>>> squares
[0, 1, 4, 9, 16, 25, 36]
"""
def __init__(self, iterable, output=None, proc_time=True):
if proc_time is True:
try:
proc_time = time.process_time # Py 3
except AttributeError:
proc_time = time.clock # Py 2
self._proc_time = proc_time
self.iterable = iterable
self.output = output
self._cur_pos = 0
self._len = len(iterable)
if proc_time:
self._t0 = self._proc_time()
def __iter__(self):
print("%d: " % self._len, file=self.output, end='')
return self
def next(self):
if self._cur_pos >= self._len:
print(' (%.3f s)\n' % (self._proc_time() - self._t0)
if self._proc_time else '\n', file=self.output, end='')
raise StopIteration
else:
self._cur_pos += 1
try:
print('.', file=self.output, end='', flush=True)
except TypeError:
print('.', file=self.output, end='')
if self.output is None or self.output is sys.stdout:
sys.stdout.flush()
return self.iterable[self._cur_pos - 1]
__next__ = next
|
chemreac/chemreac
|
chemreac/util/pyutil.py
|
pyutil.py
|
py
| 4,068
|
python
|
en
|
code
| 14
|
github-code
|
6
|
15415931528
|
import turtle
angles = [60, -120, 60, 0]
size_of_snowflake = 300
def get_input_depth():
massage = "Please provide the depth: "
value = input(massage)
while not value.isnumeric():
print("Input must ne positive integer!!!")
value = input(massage)
return int(value)
def setup_screen():
print("Setup screen")
turtle.title("koch Snowflake")
turtle.setup(640, 600)
turtle.hideturtle()
turtle.penup()
turtle.backward(240)
turtle.tracer(800)
turtle.bgcolor('white')
def draw_koch(size, depth):
if depth> 0:
for angle in angles:
draw_koch(size/3, depth-1)
turtle.left(angle)
else:
turtle.forward(size)
depth = get_input_depth()
setup_screen()
turtle.color("sky blue")
turtle.penup()
turtle.setposition(-180, 0)
turtle.left(30)
turtle.pendown()
for _ in range(3):
draw_koch(size_of_snowflake, depth)
turtle.right(120)
turtle.update()
print("Done")
turtle.done()
|
singh-hemant/python-turtle-examples
|
koch_snowflake.py
|
koch_snowflake.py
|
py
| 1,025
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10114743152
|
from __future__ import annotations
from typing import Tuple
import stage.tile_types as tile_types
from stage.game_map import GameMap
class Room:
"""Klass för att representera ett rektangulärt rum"""
def __init__(self, x: int, y: int, width: int, height: int) -> None:
self.x1 = x
self.y1 = y
self.x2 = x + width
self.y2 = y + height
self.width = width
self.height = height
# @property gör så att vi kan använda metoden som en konstant variabel snarare än en metod
# t.ex kan vi skriva mitt_rum.center istället för mitt_rum.center()
@property
def center(self) -> Tuple[int, int]:
"""Återvänder koordinaterna till centrum av rummet"""
center_x = int((self.x1 + self.x2) / 2)
center_y = int((self.y1 + self.y2) / 2)
return center_x, center_y
@property
def inner(self) -> Tuple[slice, slice]:
"""Återvänder den inre arean av det givna rummet"""
# slice() kommer att återge de givna argumenten
return slice(self.x1 + 1, self.x2), slice(self.y1 + 1, self.y2)
def intersects(self, other: Room) -> bool:
"""Återvänder sant om den här instansen av rummet överlappar med ett annat rum"""
return (
self.x1 <= other.x2
and self.x2 >= other.x1
and self.y1 <= other.y2
and self.y2 >= other.y1
)
|
programmerare93/Dungeons_of_Kwargs
|
src/stage/rooms.py
|
rooms.py
|
py
| 1,435
|
python
|
sv
|
code
| 4
|
github-code
|
6
|
17944469658
|
import tensorflow as tf
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Dense, Activation, Flatten
# import tensorflow as tf
# tf.python.control_flow_ops = tf # some hack to get tf running with Dropout
# 224x224
def alex_net_keras(x, num_classes=2, keep_prob=0.5):
x = Conv2D(92, kernel_size=(11, 11), strides=(4, 4), padding='same')(x) # conv 1
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# LRN is missing here - Caffe.
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) # pool 1
x = Conv2D(256, kernel_size=(5, 5), padding='same')(x) # miss group and pad param # conv 2
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) # pool 2
x = Conv2D(384, kernel_size=(3, 3), padding='same')(x) # conv 3
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# x = MaxPooling2D(pool_size=(3, 3))(x)
x = Conv2D(384, kernel_size=(3, 3), padding='same')(x) # conv 4
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# x = MaxPooling2D(pool_size=(3, 3))(x)
x = Conv2D(256, kernel_size=(3, 3), padding='same')(x) # conv 5
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
x = Dense(4096, kernel_initializer='normal')(x) # fc6
# dropout 0.5
# x = tf.nn.dropout(x, keep_prob=keep_prob)
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4096, kernel_initializer='normal')(x) # fc7
# dropout 0.5
#暂时没有过拟合,暂时不用
# x = tf.nn.dropout(x, keep_prob=keep_prob)
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(num_classes)(x)
# x = BatchNormalization()(x)
# x = Activation('softmax')(x)
return x
|
CharlesLoo/stockPrediction_CNN
|
alexnet_keras.py
|
alexnet_keras.py
|
py
| 1,831
|
python
|
en
|
code
| 13
|
github-code
|
6
|
6727933661
|
###In this script I combined the raw features of colone and humanbonmarrow to run with Height GWAS summary statistics
#importing the imprtant maduals
import pandas as pd
import numpy as np
import os
from pathlib import Path
arr = os.listdir('combine')
out_dir = Path("combine")
for file in arr:
new_name = prefix + file
# First rename the files
os.rename(out_dir / file, out_dir / new_name)
# Read in the data
df = pd.read_csv(out_dir / new_name, sep="\t", header=0)
# Get the name without extension
base_name = Path(out_dir / new_name).stem
print(base_name)
# Add the file name to the columns
new_cols = [base_name + "." + column for column in df.columns[1:].values]
df.columns.values[1:] = new_cols
# Overwrite the existing files with the new data frame
df.to_csv(out_dir / new_name, sep="\t", index=False, mode="w+")
|
molgenis/benchmark-gwas-prio
|
prioritization_methods/PoPS/Combine hbm_colon_rawfeatures.py
|
Combine hbm_colon_rawfeatures.py
|
py
| 926
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38813070065
|
from glob import glob
from math import fabs
sum = 0
count = 0
abs = 0
for file in glob('data/*'):
for line in open(file):
(date, time, symbol, price, qty, eott) = line.strip().split(' ')
price = float(price)
qty = int(qty)
if date < '20170601' or date > '20180201': continue
if time < '10:00:00.000000' or time > '11:59:59.999999': continue
sum += price * qty
count += qty
abs += fabs(price * qty)
print(sum, count, abs)
|
KaedeTai/exercise1
|
exercise1.py
|
exercise1.py
|
py
| 493
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73400038269
|
#!/usr/bin/env python2
import sys
sys.path.insert(0, '/root/jhbuild')
import jhbuild.main
import jhbuild.moduleset
from jhbuild.versioncontrol.git import GitBranch
import __builtin__
import json
__builtin__.__dict__['SRCDIR'] = '/root/jhbuild'
__builtin__.__dict__['PKGDATADIR'] = None
__builtin__.__dict__['DATADIR'] = None
config = jhbuild.config.Config(None, [])
config.interact = False
moduleset = jhbuild.moduleset.load(config)
repos = {}
for module in moduleset.modules.values():
if isinstance(module.branch, GitBranch):
repos[module.name] = {
'url': module.branch.module,
'branch': module.branch.branch or 'master'
}
with open('config.json', 'w') as conf:
json.dump({
'max-concurrent-indexers': 4,
'dbpath': '/db',
'repos': repos
}, conf, sort_keys=True, indent=4, separators=(',', ': '))
|
benwaffle/gnome-hound
|
gen-conf.py
|
gen-conf.py
|
py
| 878
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29913794129
|
#
# VIK Example file for working with date information
#
import os
import calendar
from datetime import date, time, datetime
def main():
os.system('clear')
## DATE OBJECTS
# Get today's date from the simple today() method from the date class
# today = date.today()
# print("Today's date is", date.today())
# print out the date's individual components
# print("Date component - Day:", date.today().day)
# print("Date component - Month:", date.today().month)
# print("Date component - Year:", date.today().year)
# retrieve today's weekday and month (0=Monday, 6=Sunday; 1=January, 12=December)
# print("Today's weekday number is", date.today().weekday())
# print("Today's month number is", date.today().month)
# Define array/list of weekday names
DayOfWeek = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
# Define array/list of month names
MonthOfYear = ["January","February","March","April","May","June","July","August","September","October","November","December"]
# print ("Which is a", DayOfWeek[date.today().weekday()], "in", MonthOfYear[date.today().month])
# The above is called 'indexing into an array'
# Ask user for name
Name = input("\nHello there! What's your name? ")
# Ask user for birthday
BirthdayInput = input("\nWhen were you born (m/d/yyyy)? ")
# Convert birthday input to date
Birthday = datetime.strptime(BirthdayInput,'%m/%d/%Y')
# print(Birthday)
# Date verb tense
if Birthday.date() < date.today():
DateTense = "fell on"
elif Birthday.date() == date.today():
DateTense = "is today - Happy Birthday!"
else:
DateTense = "will fall on"
# Create and display a single-month calendar based on birthday input
BirthdayCalendar = calendar.TextCalendar(calendar.MONDAY)
BirthdayCalendarDisplay = BirthdayCalendar.formatmonth(Birthday.date().year,Birthday.date().month)
print("\n\n",BirthdayCalendarDisplay,"\n\n")
# Calculate difference in days between birth date and today
BirthdayDiff = abs(Birthday.date() - date.today())
# Calculate age in years
AgeThisYear = date.today().year - Birthday.date().year
# Determine birthday this year
BirthdayThisYear = date(date.today().year,Birthday.date().month,Birthday.date().day)
# Calculate difference in days between today and next birthday this year
if abs(BirthdayThisYear - date.today()) < 14:
BirthdayNext = abs(BirthdayThisYear - date.today())
BirthdayNextUnit = "days"
elif abs(BirthdayThisYear - date.today()) < 32:
BirthdayNext = abs(BirthdayThisYear - date.today()) / 7
BirthdayNextUnit = "weeks"
elif abs(BirthdayThisYear - date.today()) >= 32:
BirthdayNext = abs(BirthdayThisYear - date.today()) / 30.5
BirthdayNextUnit = "months"
# Symbols for future use
Sunny = '\u263c'
Cloudy = '\u2601'
Rainy = '\u2614'
print(
Name,", your birth date,",
MonthOfYear[Birthday.date().month-1],
Birthday.date().day,",",
Birthday.date().year,
DateTense,"a",
DayOfWeek[Birthday.date().weekday()],
BirthdayDiff.days,"days ago."
)
if BirthdayThisYear < date.today():
print("\nYour birthday has passed this year.\nYou turned",AgeThisYear,"years old.\n")
elif BirthdayThisYear == date.today():
print("\nIt's your birthday today - *** HAPPY BIRTHDAY! *** \nYou turned",AgeThisYear,"years old.\n")
else:
print("\nYour birthday is coming up later this year in",BirthdayNext,BirthdayNextUnit,"\n")
## DATETIME OBJECTS
# Get today's date from the datetime class
# Get the current time
if __name__ == "__main__":
main()
|
VikramDMello/Python-Learning
|
src/Lynda.com Exercise Files/Ch3/dates_start.py
|
dates_start.py
|
py
| 3,633
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35976263312
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 19:54:28 2018
@author: andychen
"""
a=int(input("a:"))
b=int(input("b:"))
if b!=0:
b,a=a%b,b
print(a)
|
czh4/Python-Learning
|
exercise/exercise3-3.py
|
exercise3-3.py
|
py
| 181
|
python
|
en
|
code
| 1
|
github-code
|
6
|
39717267724
|
from enum import Enum
class VarTypes(Enum):
INT = "int"
FLOAT = "float"
BOOL = "bool"
STRING = "string"
VECTOR = "vector"
VOID = "void"
class Ops(Enum):
POW = "^"
NEG = "neg"
POS = "pos"
NOT_ = "not"
MAT_MULT = "@"
DOT = ".."
MULT = "*"
DIV = "/"
INT_DIV = "//"
PLUS = "+"
MINUS = "-"
GT = ">"
GT_EQ = ">="
LT = "<"
LT_EQ = "<="
EQ = "=="
NOT_EQ = "!="
AND_ = "and"
OR_ = "or"
ASSIGN = "="
GOTO = "GOTO"
GOTOF = "GOTOF"
RETURN_ = "RETURN"
GOSUB = "GOSUB"
PARAM = "PARAM"
ERA = "ERA"
ENDPROC = "ENDPROC"
PRINT = "PRINT"
PRINTLN = "PRINTLN"
READT = "READT"
READA = "READA"
READC = "READC"
WRITEF = "WRITEF"
PLOT = "PLOT"
ALLOC = "ALLOC"
VER = "VER"
class VecFilters(Enum):
F_SUM = "f_sum"
F_MEAN = "f_mean"
F_VAR = "f_var"
F_MIN = "f_min"
F_MAX = "f_max"
F_STD = "f_std"
F_NORMALIZE = "f_normalize"
F_SQUARE = "f_square"
F_CUBE = "f_cube"
F_STRIP = "f_strip"
F_LOWERCASE = "f_lowercase"
F_UPPERCASE = "f_uppercase"
F_SORT = "f_sort"
F_REVERSE = "f_reverse"
class SemanticCube(object):
"""Hold the semantic considerations table for Doflir."""
def __init__(self):
self._setup_op_categories()
self._setup_cube()
self._setup_enums_map()
self._setup_filter_reduce()
def _setup_filter_reduce(self):
"""Defines semantic considerations for filter's reduction to var."""
self._filter_reduce = {
VecFilters.F_SUM: True,
VecFilters.F_MEAN: True,
VecFilters.F_MIN: True,
VecFilters.F_MAX: True,
VecFilters.F_STD: True,
VecFilters.F_VAR: True,
VecFilters.F_NORMALIZE: False,
VecFilters.F_SQUARE: False,
VecFilters.F_CUBE: False,
VecFilters.F_STRIP: False,
VecFilters.F_LOWERCASE: False,
VecFilters.F_UPPERCASE: False,
VecFilters.F_SORT: False,
VecFilters.F_REVERSE: False,
}
def _setup_op_categories(self):
"""Defines groups of operations by their function."""
self._NUM_OPS = [
Ops.PLUS, Ops.MINUS, Ops.MULT, Ops.DIV, Ops.INT_DIV, Ops.POW,
Ops.MAT_MULT, Ops.DOT,
]
self._VEC_OPS = [
Ops.MAT_MULT, Ops.DOT, Ops.PLUS, Ops.MINUS,
]
self._REL_OPS = [
Ops.AND_, Ops.OR_, Ops.GT, Ops.GT_EQ, Ops.LT, Ops.LT_EQ, Ops.EQ,
Ops.NOT_EQ,
]
def _setup_enums_map(self):
"""Provides conversion mechanisms between operation codes and names."""
self._ops_map = {}
for op in Ops:
self._ops_map[op.value] = op
self._var_types_map = {}
for var_type in VarTypes:
self._var_types_map[var_type.value] = var_type
self._vec_filters_map = {}
for vec_filter in VecFilters:
self._vec_filters_map[vec_filter.value] = vec_filter
def _setup_cube(self):
"""Provides expected output type for a pair of operands and op."""
semantic_cube = {}
# Setup numeric operations results.
for op in self._NUM_OPS:
int_op = (VarTypes.INT, VarTypes.INT, op)
semantic_cube[int_op] = VarTypes.INT
float_op = (VarTypes.FLOAT, VarTypes.FLOAT, op)
semantic_cube[float_op] = VarTypes.FLOAT
float_int_op = (VarTypes.FLOAT, VarTypes.INT, op)
semantic_cube[float_int_op] = VarTypes.FLOAT
int_float_op = (VarTypes.INT, VarTypes.FLOAT, op)
semantic_cube[int_float_op] = VarTypes.FLOAT
# Division always produces float.
div_op = (VarTypes.INT, VarTypes.INT, Ops.DIV)
semantic_cube[div_op] = VarTypes.FLOAT
# Int division always produces int.
div_op = (VarTypes.FLOAT, VarTypes.INT, Ops.INT_DIV)
semantic_cube[div_op] = VarTypes.INT
div_op = (VarTypes.INT, VarTypes.FLOAT, Ops.INT_DIV)
semantic_cube[div_op] = VarTypes.INT
div_op = (VarTypes.FLOAT, VarTypes.FLOAT, Ops.INT_DIV)
semantic_cube[div_op] = VarTypes.INT
# Setup boolean results for relational operations.
for op in self._REL_OPS:
bool_op = (VarTypes.BOOL, VarTypes.BOOL, op)
semantic_cube[bool_op] = VarTypes.BOOL
int_op = (VarTypes.INT, VarTypes.INT, op)
semantic_cube[int_op] = VarTypes.BOOL
float_op = (VarTypes.FLOAT, VarTypes.FLOAT, op)
semantic_cube[float_op] = VarTypes.BOOL
str_op = (VarTypes.STRING, VarTypes.STRING, op)
semantic_cube[str_op] = VarTypes.BOOL
float_int_op = (VarTypes.FLOAT, VarTypes.INT, op)
semantic_cube[float_int_op] = VarTypes.BOOL
int_float_op = (VarTypes.INT, VarTypes.FLOAT, op)
semantic_cube[int_float_op] = VarTypes.BOOL
# String concatenation.
str_op = (VarTypes.STRING, VarTypes.STRING, Ops.PLUS)
semantic_cube[str_op] = VarTypes.STRING
# Setup results for vector operations.
for op in self._VEC_OPS:
vec_op = (VarTypes.VECTOR, VarTypes.VECTOR, op)
semantic_cube[vec_op] = VarTypes.VECTOR
self._cube = semantic_cube
def is_reduced(self, vec_filter):
"""Accessor for the vec filtering semantic considerations."""
return self._filter_reduce[vec_filter]
def result_type(self, op_1_type, op_2_type, operator):
"""Accessor for the semantic cube."""
target = (op_1_type, op_2_type, operator)
if target in self._cube:
return self._cube[target]
else:
return None
def result_type_str(self, op_1_type, op_2_type, operator):
"""Accessor for the semantic cube but takes a txt instead of enum."""
op_1_enum = self.type_to_enum(type_str=op_1_type)
op_2_enum = self.type_to_enum(type_str=op_2_type)
operator_enum = self.op_to_enum(op_str=operator)
return self.result_type(
op_1_type=op_1_enum,
op_2_type=op_2_enum,
operator=operator_enum
)
def type_to_enum(self, type_str):
"""Shorthand method for conversion of names to enum types."""
return self._var_types_map[type_str]
def op_to_enum(self, op_str):
"""Shorthand method for conversion of names to enum ops."""
return self._ops_map[op_str]
def filter_to_enum(self, filter_str):
"""Shorthand method for conversion of names to enum filters."""
return self._vec_filters_map[filter_str]
|
Irvel/doflir
|
SemanticCube.py
|
SemanticCube.py
|
py
| 6,726
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25854008404
|
#!/usr/bin/env python
# coding: utf-8
# In[38]:
#this code takes all the raw text files outputted from AWS textract
#and combines them into one long text file and re-separates them
#so there are not multiple apps in one document
#get the raw text file output for each pdf file and append the data to one huge text doc
#not sure if this chunk is needed or not
directories = "/file/path/"
for directory in directories:
for files in directory:
with open('rawText.txt','r') as fr:
lines = fr.readlines()
with open(directories+'/all_apps.txt','a') as fw:
fw.write(lines)
#text doc with all the pdf text together
filename = directories+"/all_apps.txt"
#substring used to divide documents
sbstr = "1. agency position no."
#clean up text
with open(filename+".txt",'r') as f:
buff = []
i = 1
lines = f.readlines()
for line in lines:
line = line.lower()
line = line.strip()
buff.append(line)
output = str("".join(buff))
num = output.count(sbstr)
#split data into seperate files
list = []
for i in range(1,num+1):
file = open('file{}.txt'.format(i),'w')
data = output.split(sbstr)[i]
file.write(data)
file.close()
|
avadodd/ocr_doc_scanning
|
split_docs.py
|
split_docs.py
|
py
| 1,271
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38775154404
|
import random
import os
import cv2
import numpy as np
import pickle
from matplotlib import style
from AI_KNearestAlogrithm import Classifier
np.set_printoptions(threshold=np.inf, suppress=True)
style.use('fivethirtyeight')
class FacialClassifier:
def __init__(self):
self.frame_array = []
self.face = []
self.face_cascade = cv2.CascadeClassifier('C:\\Users\\DELL\\Python\\#DM\\haarcascade_frontalface_default.xml')
def pick(self, pickle_file):
if os.path.isfile(pickle_file):
with open(pickle_file, 'rb') as f:
self.frame_array = pickle.load(f)
else:
raise FileNotFoundError("Pickle file not found. ")
def crop_face(self, img):
array = []
i = 0
while array == []:
i += 1
faces = self.face_cascade.detectMultiScale(img, 1.3, 5)
for (x, y, a, b) in faces:
array = img[y:y+b, x:x+b]
if i == 5:
return img
return array
def process_img(self, frame):
face = self.crop_face(frame)
grey_face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# frame = clahe.apply(grey_face)
# f = cv2.equalizeHist(frame)
frame = cv2.resize(face, (200, 200))
# print(f)
# cv2.imshow('window', frame)
# cv2.waitKey(0)
# frame_eigenvalue, frame_eigenvector = np.linalg.eig(frame)
return frame
def fit(self, directory=''):
pics = os.listdir(directory)
pics.remove('desktop.ini')
random.shuffle(pics)
# pics.remove('desktop.ini')
groups = []
for pic in pics:
if pic[0] not in groups:
groups.append(pic[0])
for g in groups:
similar = []
i = 0
for pic in pics:
group = pic[0]
print('detecting face ' + str(i + 1) + ' of ' + str(len(pics)))
if group == g:
try:
frame = cv2.imread(directory + '\\' + pic)
frame_value = self.process_img(frame)
similar.append(frame_value.astype('int64'))
except:
pass
i += 1
self.frame_array.append([g, similar])
return self.frame_array
def return_face(self):
return self.face
def cache(self, pickle_file):
with open(pickle_file, 'wb') as f:
pickle.dump(self.frame_array, f)
def recognize(self, image_dir=''):
frame = []
if image_dir == '':
img = cv2.VideoCapture(0)
for i in range(40):
check, frame = img.read()
else:
frame = cv2.imread(image_dir)
self.face = frame
cv2.imshow('window', frame)
cv2.waitKey(0)
frame_eigenvalue = self.process_img(frame)
CLR = Classifier(self.frame_array, opt='list')
result = CLR.predict(frame_eigenvalue, 3)
return result
class FacialNN:
def __init__(self, X, Y, w1, b1):
self.x = np.array(X)
self.y = np.array(Y)
self.w1 = np.array(w1)
# self.w2 = np.array(w2)
self.b1 = np.array(b1)
# self.b2 = np.array(b2)
self.L1 = np.array([])
self.L2 = np.array([])
def sigmoid(self, x):
return 1 / (1 + np.e ** -x)
def sigmoid_der(self, x):
return self.sigmoid(x) * (1 - self.sigmoid(x))
def preprocess(self, directory='', remove="desktop.ini", parts=6):
X = [[], [], [], [], [], []]
Y = []
if directory != '':
pics = os.listdir()
random.shuffle(pics)
pics.remove(remove)
for pic in pics:
# print(pic)
frame = cv2.imread(directory + '\\' + pic)
grey_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(grey_frame, (234, 234))
part_size = int(frame.shape[0] / parts)
j = i = 0
for _ in range(6):
print(i + part_size)
frame_part = frame[i:i + part_size, j:j + part_size]
X[_].append(frame_part)
i += part_size
j += part_size
self.x = X
def cache(self, pickle_file):
with open(pickle_file, 'wb') as f:
pickle.dump(self.x, f)
def feed_forward(self):
# Layer 1:
self.WX11 = WX11 = np.dot(self.w1[0], self.x[0]) + self.b1[0]
self.WX12 = WX12 = np.dot(self.w1[1], self.x[1]) + self.b1[1]
# self.WX13 = WX13 = np.dot(self.w1[2], self.x[2]) + self.b1[2]
L1 = self.sigmoid(WX11 + WX12 + self.b1[3])
self.L2 = L1
# Layer 2:
# WX21 = np.dot(self.w2[0], L1)
# WX22 = np.dot(self.w2[1], L1)
# WX23 = np.dot(self.w2[2], L1)
# self.L2 = self.sigmoid(WX21 + WX22 + WX23 + self.b2)
def back_propagation(self):
error = ((self.L2 - self.y)**2)/2
loss = error.sum()
print(loss)
# WX11
d1 = self.sigmoid_der(self.WX11)# self.sigmoid_der(self.WX11)
d2 = d1 * error
d3 = np.dot(d2, self.x[0].T)
# WX12
d4 = self.sigmoid_der(self.WX12)# self.sigmoid_der(self.WX11)
d5 = d4 * error
d6 = np.dot(d5, self.x[1].T)
# Updates:
self.w1[0] += d3
# self.w2[1] -= d6
#
# def return_weights(self):
# def predict(self):
X = [[[1,0,1], [0,0,0]], [[1,1,1], [0,1,1]]]
Y = [1,0,1]
w1 = np.random.rand(2,2)
b1 = [0.3, 0.2, 0.1, 0.5]
def main():
FNN = FacialNN(X, Y, w1, b1)
for i in range(60000):
FNN.preprocess('C:\\Users\\DELL\\Pictures\\Camera Roll')
FNN.feed_forward()
FNN.back_propagation()
if __name__ == '__main__':
main()
|
olusegvn/Defence-and-Privacy-mechanisms
|
AI_FacialRecognition.py
|
AI_FacialRecognition.py
|
py
| 6,190
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25352417620
|
# coding: utf-8
__author__ = "humkyung <humkyung@atools.co.kr>"
# Imports
import os, sys
import vtk
from enum import IntEnum
class NetworksJsonImporter:
KEY = vtk.vtkInformationStringVectorKey.MakeKey('Attribute', 'vtkActor')
def __init__(self):
self._file_path = None
self._nodes = {}
self._edges = []
def SetFileName(self, file_path: str) -> None:
self._file_path = file_path
def Read(self) -> None:
"""
@brief: read given file
"""
import json
if os.path.isfile(self._file_path):
_dict = None
with open(self._file_path, encoding="utf-8") as f:
all = f.read()
_dict = json.loads(all)
if _dict:
self.Parse(_dict)
def Parse(self, _dict: dict):
"""
@brief: parse given lines
"""
for node in _dict['nodes']:
self._nodes[node['name']] = [float(x) for x in node['pos'].split(',')]
for edge in _dict['edges']:
self._edges.append([edge['start'], edge['end'], float(edge['length'])])
def GetOutput(self, renderer):
"""
@brief: add actors to renderer
"""
from Primitives.Sphere import Sphere
from Primitives.Cylinder import Cylinder
try:
for name, pos in self._nodes.items():
actor = Sphere(renderer, pos).actor
info = actor.GetProperty().GetInformation()
info.Append(NetworksJsonImporter.KEY, f'{{\"name\":\"{name}\"}}')
# Generate the polyline for the spline.
points = vtk.vtkPoints()
edge_data = vtk.vtkPolyData()
# Edges
for edge in self._edges:
u, v = edge[0], edge[1]
(sx, sy, sz) = self._nodes[u]
(ex, ey, ez) = self._nodes[v]
actor = Cylinder(renderer, pt1=(sx, sy, sz), pt2=(ex, ey, ez), radius=0.1).actor
info = actor.GetProperty().GetInformation()
info_str = f'{{\"start\":\"{u}\",\"end\":\"{v}\",\"length\":\"{edge[2]}\"}}'
info.Append(NetworksJsonImporter.KEY, info_str)
except Exception as ex:
message = f"error occurred({repr(ex)}) in {sys.exc_info()[-1].tb_frame.f_code.co_filename}:" \
f"{sys.exc_info()[-1].tb_lineno}"
print(message)
if __name__ == '__main__':
importer = NetworksJsonImporter()
importer.SetFileName('sample.json')
importer.Read()
|
humkyung/AViewer
|
NetworkxJson/NetworkxJsonImporter.py
|
NetworkxJsonImporter.py
|
py
| 2,564
|
python
|
en
|
code
| 2
|
github-code
|
6
|
18803588453
|
from django.urls import path, include
from watchlist_app.api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('stream', views.StreamPlatformVS,
basename='streamplatform')
urlpatterns = [
path('list/', views.WatchListAV.as_view(), name='Watch-list'),
path('<int:pk>/', views.WatchDetailsAV.as_view(), name='Watch-details'),
path('list2/', views.WatchListGV.as_view(), name='Watch-list-new'),
path('', include(router.urls)),
# path('stream/', views.StreamPlatformAV.as_view(), name='stream'),
# path('stream/<int:pk>', views.StreamDetailsAV.as_view(),
# name='streamplatform-detail'),
# path('review/', views.ReviewList.as_view(), name='review-list'),
# path('review/<int:pk>', views.ReviewDetail.as_view(), name='review-detail'),
path('<int:pk>/review-create',
views.ReviewCreate.as_view(), name='review-create'),
path('<int:pk>/reviews/', views.ReviewList.as_view(), name='review-list'),
path('review/<int:pk>/',
views.ReviewDetail.as_view(), name='review-detail'),
path('reviews/',
views.UserReview.as_view(), name='user-review-detail'),
]
|
aliesmaeli79/watchmateAPI
|
watchlist_app/api/urls.py
|
urls.py
|
py
| 1,206
|
python
|
en
|
code
| 1
|
github-code
|
6
|
24681181962
|
import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers import pipeline
import torch
#model and tokenizer loading
checkpoint = "LaMini-Flan-T5-248M"
tokenizer = T5Tokenizer.from_pretrained(checkpoint)
base_model = T5ForConditionalGeneration.from_pretrained(checkpoint, device_map='auto', torch_dtype=torch.float32)
def paragraph_summarization(input_text):
paragraphs = input_text.split('\n\n') # Split text into paragraphs
summary_pipeline = pipeline(
'summarization',
model=base_model,
tokenizer=tokenizer,
max_length=300, # Adjust max_length as needed for paragraph summaries
min_length=30) # Adjust min_length as needed
summaries = []
for paragraph in paragraphs:
if len(paragraph.strip()) > 0:
summary = summary_pipeline(paragraph)[0]['summary_text']
summaries.append(summary)
return summaries
#streamlit code
st.set_page_config(layout="wide")
def main():
st.title("Paragraph Summarization App")
# user input text
input_text = st.text_area("Enter your paragraphs here:", "", )
if st.button("Summarize"):
col1, col2 = st.columns(2)
with col1:
st.info("Written paragraphs")
st.write(input_text)
#pdf_viewer = displayPDF(filePath)
with col2:
st.info("Summarized paragraphs")
summaries = paragraph_summarization(input_text)
for i, summary in enumerate(summaries):
st.success(f"Summary for Paragraph {i+1}: {summary}")
if __name__ == "__main__":
main()
|
Shoaib-Alauudin/Text-Summarization-Using-LLM
|
app.py
|
app.py
|
py
| 1,650
|
python
|
en
|
code
| 0
|
github-code
|
6
|
38358782751
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 29 16:52:22 2017
@author: prver
"""
import pandas as pd
import seaborn as sns
#Import Jan 2017 Turnstile Data and Group By Station/Time
fields = ['Station', 'Time', 'Entries',
'Exits']
df = pd.read_csv('Jan2017.csv', header=0, skipinitialspace=True, usecols=fields)
df2 = df.groupby(['Station'])['Entries', 'Exits'].agg(sum)
x=df2['Entries']
y=df2['Exits']
#Plot Scatter Plot of Entries vs Exits to Have An Idea of Station Data
p=sns.lmplot(x='Entries', y='Exits', data=df2, fit_reg=False, scatter_kws={"marker": "D", "s": 50})
p.set_xlabels("Entries")
p.set_ylabels("Exits")
p.fig.suptitle('January 2017 Scatter Plot By Entries vs Exits \n Most Stations Have Little Traffic', weight= 'bold', fontsize=11) # can also get the figure from plt.gcf()
#Plot Hierarchal Clustering Heat Map to Group Similar Traffic Together
sns.set_context("paper", rc={"font.size":4,"axes.titlesize":4,"axes.labelsize":11})
g=sns.clustermap(df2, cmap="bwr", figsize=(3, 75), standard_scale=1, method="average", yticklabels=1, col_cluster=False,
cbar_kws={"label": "Standardized\nTraffic"})
g.ax_col_dendrogram.set_title(
"Hierarchical Clustering Heat Map \n January 2017 Turnstile Data\n To Group Stations By Foot Traffic \nCould Be used for New Development",
weight= 'bold', fontsize=11)
g.ax_row_dendrogram.set_axis_on()
g.ax_heatmap.set(xlabel='Foot Traffic')
g.ax_heatmap.set(ylabel='Station')
|
jxyu90/piggly-wiggly
|
TurnSample.py
|
TurnSample.py
|
py
| 1,509
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6486476570
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from lavidaorganic.apps.talleres.models import Taller
from paypal.standard.forms import PayPalPaymentsForm
from django.shortcuts import get_object_or_404
import datetime
def talleres(request):
lista_talleres = Taller.objects.filter(fecha__gte=datetime.date.today()).order_by('fecha')
#ALGORITMO PARA FILTRAR
ctx = {'talleres': lista_talleres}
return render_to_response('talleres/talleres.html', ctx, context_instance=RequestContext(request))
def taller(request, titulo):
titulo = titulo.replace('_', ' ')
taller = get_object_or_404(Taller, titulo=titulo)
if taller.inscritos < taller.capacidad:
cupo = True
else:
cupo = False
#Asesorio personalizada
paypal_dict_taller = {
"business": "lavidaorganic@lavidaorganic.com",
"amount": taller.precio,
"item_name": taller.titulo,
"notify_url": "http://lavidaorganic.com/paypalito-manager/",
"return_url": "http://lavidaorganic.com/historia-de-salud/",
"cancel_return": "http://lavidaorganic.com/",
}
# Create the instance.
form_taller = PayPalPaymentsForm(initial=paypal_dict_taller)
ctx = {'taller': taller, 'form_taller':form_taller, 'cupo': cupo}
return render_to_response('talleres/taller_detalle.html', ctx, context_instance=RequestContext(request))
|
Reston/lavidaorganic
|
lavidaorganic/lavidaorganic/apps/talleres/views.py
|
views.py
|
py
| 1,372
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37528229182
|
import bs4 as bs
from urllib import request
def get_urls(file):
f = open(file,"r")
urls = []
for line in f.readlines():
urls.append(line)
return urls
def enter_urls(file,urls):
f = open(file,'w')
for url in urls:
f.write(url+'\n')
f.close()
def make_unique(arr):
arr2 = []
for i in arr:
if i not in arr2:
arr2.append(i)
return arr2
fname = 'urls.csv'
urls = get_urls(fname)
i = 0
while True:
try:
html = request.urlopen(urls[i]).read()
soup = bs.BeautifulSoup(html,'html.parser')
links = soup.find_all('a')
urls_new = []
for link in links:
href = link['href']
if href.find('http') == -1:
urls_new.append(urls[i].replace('\n','')+href[1:len(href)])
for url in urls_new:
urls.append(url)
i += 1
except:
break
urls = make_unique(urls)
enter_urls('result.csv',urls)
|
stefanivus/Web-scraping
|
Web Cralwer.py
|
Web Cralwer.py
|
py
| 1,017
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23204188996
|
import re
with open('input.txt') as infile:
claims = [claim.strip() for claim in infile.readlines()]
fabric = [[{'claimed_by': [], 'num_claims': 0} for x in range(1001)] for y in range(1001)]
claim_re = re.compile("#(\d+)\s@\s(\d+),(\d+):\s(\d+)x(\d+)")
claimants = list()
for claim in claims:
match = claim_re.match(claim)
if match:
claim_id = int(match.group(1))
claimants.append(claim_id)
claim_pos = (int(match.group(2)), int(match.group(3)))
claim_dims = (int(match.group(4)), int(match.group(5)))
for x in range(claim_pos[0], claim_pos[0] + claim_dims[0]):
for y in range(claim_pos[1], claim_pos[1] + claim_dims[1]):
fabric[y][x]['claimed_by'].append(claim_id)
fabric[y][x]['num_claims'] += 1
sole_claimant = None
num_contested_inches = 0
for row in fabric:
for col in row:
if col['num_claims'] > 1:
num_contested_inches += 1
for claimant in col['claimed_by']:
if claimant in claimants:
claimants.remove(claimant)
print(f"Number of square inches with 2 claims or more: {num_contested_inches}")
print(f"Sole claimant: {claimants[0]}")
|
jandersson/AdventOfCode2018
|
3/fabric_slicing.py
|
fabric_slicing.py
|
py
| 1,209
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.