mirror of
https://github.com/azaion/gps-denied-onboard.git
synced 2026-04-23 00:26:36 +00:00
Initial commit
This commit is contained in:
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
DiskCache API Reference
|
||||
=======================
|
||||
|
||||
The :doc:`tutorial` provides a helpful walkthrough of most methods.
|
||||
"""
|
||||
|
||||
from .core import (
|
||||
DEFAULT_SETTINGS,
|
||||
ENOVAL,
|
||||
EVICTION_POLICY,
|
||||
UNKNOWN,
|
||||
Cache,
|
||||
Disk,
|
||||
EmptyDirWarning,
|
||||
JSONDisk,
|
||||
Timeout,
|
||||
UnknownFileWarning,
|
||||
)
|
||||
from .fanout import FanoutCache
|
||||
from .persistent import Deque, Index
|
||||
from .recipes import (
|
||||
Averager,
|
||||
BoundedSemaphore,
|
||||
Lock,
|
||||
RLock,
|
||||
barrier,
|
||||
memoize_stampede,
|
||||
throttle,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'Averager',
|
||||
'BoundedSemaphore',
|
||||
'Cache',
|
||||
'DEFAULT_SETTINGS',
|
||||
'Deque',
|
||||
'Disk',
|
||||
'ENOVAL',
|
||||
'EVICTION_POLICY',
|
||||
'EmptyDirWarning',
|
||||
'FanoutCache',
|
||||
'Index',
|
||||
'JSONDisk',
|
||||
'Lock',
|
||||
'RLock',
|
||||
'Timeout',
|
||||
'UNKNOWN',
|
||||
'UnknownFileWarning',
|
||||
'barrier',
|
||||
'memoize_stampede',
|
||||
'throttle',
|
||||
]
|
||||
|
||||
try:
|
||||
from .djangocache import DjangoCache # noqa
|
||||
|
||||
__all__.append('DjangoCache')
|
||||
except Exception: # pylint: disable=broad-except # pragma: no cover
|
||||
# Django not installed or not setup so ignore.
|
||||
pass
|
||||
|
||||
__title__ = 'diskcache'
|
||||
__version__ = '5.6.3'
|
||||
__build__ = 0x050603
|
||||
__author__ = 'Grant Jenks'
|
||||
__license__ = 'Apache 2.0'
|
||||
__copyright__ = 'Copyright 2016-2023 Grant Jenks'
|
||||
@@ -0,0 +1 @@
|
||||
"""Command line interface to disk cache."""
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,456 @@
|
||||
"""Django-compatible disk and file backed cache."""
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from django.core.cache.backends.base import BaseCache
|
||||
|
||||
try:
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT
|
||||
except ImportError: # pragma: no cover
|
||||
# For older versions of Django simply use 300 seconds.
|
||||
DEFAULT_TIMEOUT = 300
|
||||
|
||||
from .core import ENOVAL, args_to_key, full_name
|
||||
from .fanout import FanoutCache
|
||||
|
||||
|
||||
class DjangoCache(BaseCache):
|
||||
"""Django-compatible disk and file backed cache."""
|
||||
|
||||
def __init__(self, directory, params):
|
||||
"""Initialize DjangoCache instance.
|
||||
|
||||
:param str directory: cache directory
|
||||
:param dict params: cache parameters
|
||||
|
||||
"""
|
||||
super().__init__(params)
|
||||
shards = params.get('SHARDS', 8)
|
||||
timeout = params.get('DATABASE_TIMEOUT', 0.010)
|
||||
options = params.get('OPTIONS', {})
|
||||
self._cache = FanoutCache(directory, shards, timeout, **options)
|
||||
|
||||
@property
|
||||
def directory(self):
|
||||
"""Cache directory."""
|
||||
return self._cache.directory
|
||||
|
||||
def cache(self, name):
|
||||
"""Return Cache with given `name` in subdirectory.
|
||||
|
||||
:param str name: subdirectory name for Cache
|
||||
:return: Cache with given name
|
||||
|
||||
"""
|
||||
return self._cache.cache(name)
|
||||
|
||||
def deque(self, name, maxlen=None):
|
||||
"""Return Deque with given `name` in subdirectory.
|
||||
|
||||
:param str name: subdirectory name for Deque
|
||||
:param maxlen: max length (default None, no max)
|
||||
:return: Deque with given name
|
||||
|
||||
"""
|
||||
return self._cache.deque(name, maxlen=maxlen)
|
||||
|
||||
def index(self, name):
|
||||
"""Return Index with given `name` in subdirectory.
|
||||
|
||||
:param str name: subdirectory name for Index
|
||||
:return: Index with given name
|
||||
|
||||
"""
|
||||
return self._cache.index(name)
|
||||
|
||||
def add(
|
||||
self,
|
||||
key,
|
||||
value,
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
version=None,
|
||||
read=False,
|
||||
tag=None,
|
||||
retry=True,
|
||||
):
|
||||
"""Set a value in the cache if the key does not already exist. If
|
||||
timeout is given, that timeout will be used for the key; otherwise the
|
||||
default cache timeout will be used.
|
||||
|
||||
Return True if the value was stored, False otherwise.
|
||||
|
||||
:param key: key for item
|
||||
:param value: value for item
|
||||
:param float timeout: seconds until the item expires
|
||||
(default 300 seconds)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param bool read: read value as bytes from file (default False)
|
||||
:param str tag: text to associate with key (default None)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: True if item was added
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
key = self.make_key(key, version=version)
|
||||
timeout = self.get_backend_timeout(timeout=timeout)
|
||||
return self._cache.add(key, value, timeout, read, tag, retry)
|
||||
|
||||
def get(
|
||||
self,
|
||||
key,
|
||||
default=None,
|
||||
version=None,
|
||||
read=False,
|
||||
expire_time=False,
|
||||
tag=False,
|
||||
retry=False,
|
||||
):
|
||||
"""Fetch a given key from the cache. If the key does not exist, return
|
||||
default, which itself defaults to None.
|
||||
|
||||
:param key: key for item
|
||||
:param default: return value if key is missing (default None)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param bool read: if True, return file handle to value
|
||||
(default False)
|
||||
:param float expire_time: if True, return expire_time in tuple
|
||||
(default False)
|
||||
:param tag: if True, return tag in tuple (default False)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: value for item if key is found else default
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
key = self.make_key(key, version=version)
|
||||
return self._cache.get(key, default, read, expire_time, tag, retry)
|
||||
|
||||
def read(self, key, version=None):
|
||||
"""Return file handle corresponding to `key` from Cache.
|
||||
|
||||
:param key: Python key to retrieve
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:return: file open for reading in binary mode
|
||||
:raises KeyError: if key is not found
|
||||
|
||||
"""
|
||||
key = self.make_key(key, version=version)
|
||||
return self._cache.read(key)
|
||||
|
||||
def set(
|
||||
self,
|
||||
key,
|
||||
value,
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
version=None,
|
||||
read=False,
|
||||
tag=None,
|
||||
retry=True,
|
||||
):
|
||||
"""Set a value in the cache. If timeout is given, that timeout will be
|
||||
used for the key; otherwise the default cache timeout will be used.
|
||||
|
||||
:param key: key for item
|
||||
:param value: value for item
|
||||
:param float timeout: seconds until the item expires
|
||||
(default 300 seconds)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param bool read: read value as bytes from file (default False)
|
||||
:param str tag: text to associate with key (default None)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: True if item was set
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
key = self.make_key(key, version=version)
|
||||
timeout = self.get_backend_timeout(timeout=timeout)
|
||||
return self._cache.set(key, value, timeout, read, tag, retry)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, retry=True):
|
||||
"""Touch a key in the cache. If timeout is given, that timeout will be
|
||||
used for the key; otherwise the default cache timeout will be used.
|
||||
|
||||
:param key: key for item
|
||||
:param float timeout: seconds until the item expires
|
||||
(default 300 seconds)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: True if key was touched
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
key = self.make_key(key, version=version)
|
||||
timeout = self.get_backend_timeout(timeout=timeout)
|
||||
return self._cache.touch(key, timeout, retry)
|
||||
|
||||
def pop(
|
||||
self,
|
||||
key,
|
||||
default=None,
|
||||
version=None,
|
||||
expire_time=False,
|
||||
tag=False,
|
||||
retry=True,
|
||||
):
|
||||
"""Remove corresponding item for `key` from cache and return value.
|
||||
|
||||
If `key` is missing, return `default`.
|
||||
|
||||
Operation is atomic. Concurrent operations will be serialized.
|
||||
|
||||
:param key: key for item
|
||||
:param default: return value if key is missing (default None)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param float expire_time: if True, return expire_time in tuple
|
||||
(default False)
|
||||
:param tag: if True, return tag in tuple (default False)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: value for item if key is found else default
|
||||
|
||||
"""
|
||||
key = self.make_key(key, version=version)
|
||||
return self._cache.pop(key, default, expire_time, tag, retry)
|
||||
|
||||
def delete(self, key, version=None, retry=True):
|
||||
"""Delete a key from the cache, failing silently.
|
||||
|
||||
:param key: key for item
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: True if item was deleted
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
key = self.make_key(key, version=version)
|
||||
return self._cache.delete(key, retry)
|
||||
|
||||
def incr(self, key, delta=1, version=None, default=None, retry=True):
|
||||
"""Increment value by delta for item with key.
|
||||
|
||||
If key is missing and default is None then raise KeyError. Else if key
|
||||
is missing and default is not None then use default for value.
|
||||
|
||||
Operation is atomic. All concurrent increment operations will be
|
||||
counted individually.
|
||||
|
||||
Assumes value may be stored in a SQLite column. Most builds that target
|
||||
machines with 64-bit pointer widths will support 64-bit signed
|
||||
integers.
|
||||
|
||||
:param key: key for item
|
||||
:param int delta: amount to increment (default 1)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param int default: value if key is missing (default None)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: new value for item on success else None
|
||||
:raises ValueError: if key is not found and default is None
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
key = self.make_key(key, version=version)
|
||||
try:
|
||||
return self._cache.incr(key, delta, default, retry)
|
||||
except KeyError:
|
||||
raise ValueError("Key '%s' not found" % key) from None
|
||||
|
||||
def decr(self, key, delta=1, version=None, default=None, retry=True):
|
||||
"""Decrement value by delta for item with key.
|
||||
|
||||
If key is missing and default is None then raise KeyError. Else if key
|
||||
is missing and default is not None then use default for value.
|
||||
|
||||
Operation is atomic. All concurrent decrement operations will be
|
||||
counted individually.
|
||||
|
||||
Unlike Memcached, negative values are supported. Value may be
|
||||
decremented below zero.
|
||||
|
||||
Assumes value may be stored in a SQLite column. Most builds that target
|
||||
machines with 64-bit pointer widths will support 64-bit signed
|
||||
integers.
|
||||
|
||||
:param key: key for item
|
||||
:param int delta: amount to decrement (default 1)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param int default: value if key is missing (default None)
|
||||
:param bool retry: retry if database timeout occurs (default True)
|
||||
:return: new value for item on success else None
|
||||
:raises ValueError: if key is not found and default is None
|
||||
|
||||
"""
|
||||
# pylint: disable=arguments-differ
|
||||
return self.incr(key, -delta, version, default, retry)
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
"""Returns True if the key is in the cache and has not expired.
|
||||
|
||||
:param key: key for item
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:return: True if key is found
|
||||
|
||||
"""
|
||||
key = self.make_key(key, version=version)
|
||||
return key in self._cache
|
||||
|
||||
def expire(self):
|
||||
"""Remove expired items from cache.
|
||||
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._cache.expire()
|
||||
|
||||
def stats(self, enable=True, reset=False):
|
||||
"""Return cache statistics hits and misses.
|
||||
|
||||
:param bool enable: enable collecting statistics (default True)
|
||||
:param bool reset: reset hits and misses to 0 (default False)
|
||||
:return: (hits, misses)
|
||||
|
||||
"""
|
||||
return self._cache.stats(enable=enable, reset=reset)
|
||||
|
||||
def create_tag_index(self):
|
||||
"""Create tag index on cache database.
|
||||
|
||||
Better to initialize cache with `tag_index=True` than use this.
|
||||
|
||||
:raises Timeout: if database timeout occurs
|
||||
|
||||
"""
|
||||
self._cache.create_tag_index()
|
||||
|
||||
def drop_tag_index(self):
|
||||
"""Drop tag index on cache database.
|
||||
|
||||
:raises Timeout: if database timeout occurs
|
||||
|
||||
"""
|
||||
self._cache.drop_tag_index()
|
||||
|
||||
def evict(self, tag):
|
||||
"""Remove items with matching `tag` from cache.
|
||||
|
||||
:param str tag: tag identifying items
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._cache.evict(tag)
|
||||
|
||||
def cull(self):
|
||||
"""Cull items from cache until volume is less than size limit.
|
||||
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._cache.cull()
|
||||
|
||||
def clear(self):
|
||||
"""Remove *all* values from the cache at once."""
|
||||
return self._cache.clear()
|
||||
|
||||
def close(self, **kwargs):
|
||||
"""Close the cache connection."""
|
||||
# pylint: disable=unused-argument
|
||||
self._cache.close()
|
||||
|
||||
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
|
||||
"""Return seconds to expiration.
|
||||
|
||||
:param float timeout: seconds until the item expires
|
||||
(default 300 seconds)
|
||||
|
||||
"""
|
||||
if timeout == DEFAULT_TIMEOUT:
|
||||
timeout = self.default_timeout
|
||||
elif timeout == 0:
|
||||
# ticket 21147 - avoid time.time() related precision issues
|
||||
timeout = -1
|
||||
return None if timeout is None else timeout
|
||||
|
||||
def memoize(
|
||||
self,
|
||||
name=None,
|
||||
timeout=DEFAULT_TIMEOUT,
|
||||
version=None,
|
||||
typed=False,
|
||||
tag=None,
|
||||
ignore=(),
|
||||
):
|
||||
"""Memoizing cache decorator.
|
||||
|
||||
Decorator to wrap callable with memoizing function using cache.
|
||||
Repeated calls with the same arguments will lookup result in cache and
|
||||
avoid function evaluation.
|
||||
|
||||
If name is set to None (default), the callable name will be determined
|
||||
automatically.
|
||||
|
||||
When timeout is set to zero, function results will not be set in the
|
||||
cache. Cache lookups still occur, however. Read
|
||||
:doc:`case-study-landing-page-caching` for example usage.
|
||||
|
||||
If typed is set to True, function arguments of different types will be
|
||||
cached separately. For example, f(3) and f(3.0) will be treated as
|
||||
distinct calls with distinct results.
|
||||
|
||||
The original underlying function is accessible through the __wrapped__
|
||||
attribute. This is useful for introspection, for bypassing the cache,
|
||||
or for rewrapping the function with a different cache.
|
||||
|
||||
An additional `__cache_key__` attribute can be used to generate the
|
||||
cache key used for the given arguments.
|
||||
|
||||
Remember to call memoize when decorating a callable. If you forget,
|
||||
then a TypeError will occur.
|
||||
|
||||
:param str name: name given for callable (default None, automatic)
|
||||
:param float timeout: seconds until the item expires
|
||||
(default 300 seconds)
|
||||
:param int version: key version number (default None, cache parameter)
|
||||
:param bool typed: cache different types separately (default False)
|
||||
:param str tag: text to associate with arguments (default None)
|
||||
:param set ignore: positional or keyword args to ignore (default ())
|
||||
:return: callable decorator
|
||||
|
||||
"""
|
||||
# Caution: Nearly identical code exists in Cache.memoize
|
||||
if callable(name):
|
||||
raise TypeError('name cannot be callable')
|
||||
|
||||
def decorator(func):
|
||||
"""Decorator created by memoize() for callable `func`."""
|
||||
base = (full_name(func),) if name is None else (name,)
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
"""Wrapper for callable to cache arguments and return values."""
|
||||
key = wrapper.__cache_key__(*args, **kwargs)
|
||||
result = self.get(key, ENOVAL, version, retry=True)
|
||||
|
||||
if result is ENOVAL:
|
||||
result = func(*args, **kwargs)
|
||||
valid_timeout = (
|
||||
timeout is None
|
||||
or timeout == DEFAULT_TIMEOUT
|
||||
or timeout > 0
|
||||
)
|
||||
if valid_timeout:
|
||||
self.set(
|
||||
key,
|
||||
result,
|
||||
timeout,
|
||||
version,
|
||||
tag=tag,
|
||||
retry=True,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def __cache_key__(*args, **kwargs):
|
||||
"""Make key for cache given function arguments."""
|
||||
return args_to_key(base, args, kwargs, typed, ignore)
|
||||
|
||||
wrapper.__cache_key__ = __cache_key__
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
@@ -0,0 +1,687 @@
|
||||
"""Fanout cache automatically shards keys and values."""
|
||||
|
||||
import contextlib as cl
|
||||
import functools
|
||||
import itertools as it
|
||||
import operator
|
||||
import os.path as op
|
||||
import sqlite3
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from .core import DEFAULT_SETTINGS, ENOVAL, Cache, Disk, Timeout
|
||||
from .persistent import Deque, Index
|
||||
|
||||
|
||||
class FanoutCache:
|
||||
"""Cache that shards keys and values."""
|
||||
|
||||
def __init__(
|
||||
self, directory=None, shards=8, timeout=0.010, disk=Disk, **settings
|
||||
):
|
||||
"""Initialize cache instance.
|
||||
|
||||
:param str directory: cache directory
|
||||
:param int shards: number of shards to distribute writes
|
||||
:param float timeout: SQLite connection timeout
|
||||
:param disk: `Disk` instance for serialization
|
||||
:param settings: any of `DEFAULT_SETTINGS`
|
||||
|
||||
"""
|
||||
if directory is None:
|
||||
directory = tempfile.mkdtemp(prefix='diskcache-')
|
||||
directory = str(directory)
|
||||
directory = op.expanduser(directory)
|
||||
directory = op.expandvars(directory)
|
||||
|
||||
default_size_limit = DEFAULT_SETTINGS['size_limit']
|
||||
size_limit = settings.pop('size_limit', default_size_limit) / shards
|
||||
|
||||
self._count = shards
|
||||
self._directory = directory
|
||||
self._disk = disk
|
||||
self._shards = tuple(
|
||||
Cache(
|
||||
directory=op.join(directory, '%03d' % num),
|
||||
timeout=timeout,
|
||||
disk=disk,
|
||||
size_limit=size_limit,
|
||||
**settings,
|
||||
)
|
||||
for num in range(shards)
|
||||
)
|
||||
self._hash = self._shards[0].disk.hash
|
||||
self._caches = {}
|
||||
self._deques = {}
|
||||
self._indexes = {}
|
||||
|
||||
@property
|
||||
def directory(self):
|
||||
"""Cache directory."""
|
||||
return self._directory
|
||||
|
||||
def __getattr__(self, name):
|
||||
safe_names = {'timeout', 'disk'}
|
||||
valid_name = name in DEFAULT_SETTINGS or name in safe_names
|
||||
assert valid_name, 'cannot access {} in cache shard'.format(name)
|
||||
return getattr(self._shards[0], name)
|
||||
|
||||
@cl.contextmanager
|
||||
def transact(self, retry=True):
|
||||
"""Context manager to perform a transaction by locking the cache.
|
||||
|
||||
While the cache is locked, no other write operation is permitted.
|
||||
Transactions should therefore be as short as possible. Read and write
|
||||
operations performed in a transaction are atomic. Read operations may
|
||||
occur concurrent to a transaction.
|
||||
|
||||
Transactions may be nested and may not be shared between threads.
|
||||
|
||||
Blocks until transactions are held on all cache shards by retrying as
|
||||
necessary.
|
||||
|
||||
>>> cache = FanoutCache()
|
||||
>>> with cache.transact(): # Atomically increment two keys.
|
||||
... _ = cache.incr('total', 123.4)
|
||||
... _ = cache.incr('count', 1)
|
||||
>>> with cache.transact(): # Atomically calculate average.
|
||||
... average = cache['total'] / cache['count']
|
||||
>>> average
|
||||
123.4
|
||||
|
||||
:return: context manager for use in `with` statement
|
||||
|
||||
"""
|
||||
assert retry, 'retry must be True in FanoutCache'
|
||||
with cl.ExitStack() as stack:
|
||||
for shard in self._shards:
|
||||
shard_transaction = shard.transact(retry=True)
|
||||
stack.enter_context(shard_transaction)
|
||||
yield
|
||||
|
||||
def set(self, key, value, expire=None, read=False, tag=None, retry=False):
|
||||
"""Set `key` and `value` item in cache.
|
||||
|
||||
When `read` is `True`, `value` should be a file-like object opened
|
||||
for reading in binary mode.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param value: value for item
|
||||
:param float expire: seconds until the key expires
|
||||
(default None, no expiry)
|
||||
:param bool read: read value as raw bytes from file (default False)
|
||||
:param str tag: text to associate with key (default None)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: True if item was set
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.set(key, value, expire, read, tag, retry)
|
||||
except Timeout:
|
||||
return False
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Set `key` and `value` item in cache.
|
||||
|
||||
Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
|
||||
|
||||
:param key: key for item
|
||||
:param value: value for item
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
shard[key] = value
|
||||
|
||||
def touch(self, key, expire=None, retry=False):
|
||||
"""Touch `key` in cache and update `expire` time.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param float expire: seconds until the key expires
|
||||
(default None, no expiry)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: True if key was touched
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.touch(key, expire, retry)
|
||||
except Timeout:
|
||||
return False
|
||||
|
||||
def add(self, key, value, expire=None, read=False, tag=None, retry=False):
|
||||
"""Add `key` and `value` item to cache.
|
||||
|
||||
Similar to `set`, but only add to cache if key not present.
|
||||
|
||||
This operation is atomic. Only one concurrent add operation for given
|
||||
key from separate threads or processes will succeed.
|
||||
|
||||
When `read` is `True`, `value` should be a file-like object opened
|
||||
for reading in binary mode.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param value: value for item
|
||||
:param float expire: seconds until the key expires
|
||||
(default None, no expiry)
|
||||
:param bool read: read value as bytes from file (default False)
|
||||
:param str tag: text to associate with key (default None)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: True if item was added
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.add(key, value, expire, read, tag, retry)
|
||||
except Timeout:
|
||||
return False
|
||||
|
||||
def incr(self, key, delta=1, default=0, retry=False):
|
||||
"""Increment value by delta for item with key.
|
||||
|
||||
If key is missing and default is None then raise KeyError. Else if key
|
||||
is missing and default is not None then use default for value.
|
||||
|
||||
Operation is atomic. All concurrent increment operations will be
|
||||
counted individually.
|
||||
|
||||
Assumes value may be stored in a SQLite column. Most builds that target
|
||||
machines with 64-bit pointer widths will support 64-bit signed
|
||||
integers.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param int delta: amount to increment (default 1)
|
||||
:param int default: value if key is missing (default 0)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: new value for item on success else None
|
||||
:raises KeyError: if key is not found and default is None
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.incr(key, delta, default, retry)
|
||||
except Timeout:
|
||||
return None
|
||||
|
||||
def decr(self, key, delta=1, default=0, retry=False):
|
||||
"""Decrement value by delta for item with key.
|
||||
|
||||
If key is missing and default is None then raise KeyError. Else if key
|
||||
is missing and default is not None then use default for value.
|
||||
|
||||
Operation is atomic. All concurrent decrement operations will be
|
||||
counted individually.
|
||||
|
||||
Unlike Memcached, negative values are supported. Value may be
|
||||
decremented below zero.
|
||||
|
||||
Assumes value may be stored in a SQLite column. Most builds that target
|
||||
machines with 64-bit pointer widths will support 64-bit signed
|
||||
integers.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param int delta: amount to decrement (default 1)
|
||||
:param int default: value if key is missing (default 0)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: new value for item on success else None
|
||||
:raises KeyError: if key is not found and default is None
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.decr(key, delta, default, retry)
|
||||
except Timeout:
|
||||
return None
|
||||
|
||||
def get(
|
||||
self,
|
||||
key,
|
||||
default=None,
|
||||
read=False,
|
||||
expire_time=False,
|
||||
tag=False,
|
||||
retry=False,
|
||||
):
|
||||
"""Retrieve value from cache. If `key` is missing, return `default`.
|
||||
|
||||
If database timeout occurs then returns `default` unless `retry` is set
|
||||
to `True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param default: return value if key is missing (default None)
|
||||
:param bool read: if True, return file handle to value
|
||||
(default False)
|
||||
:param float expire_time: if True, return expire_time in tuple
|
||||
(default False)
|
||||
:param tag: if True, return tag in tuple (default False)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: value for item if key is found else default
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.get(key, default, read, expire_time, tag, retry)
|
||||
except (Timeout, sqlite3.OperationalError):
|
||||
return default
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Return corresponding value for `key` from cache.
|
||||
|
||||
Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
|
||||
|
||||
:param key: key for item
|
||||
:return: value for item
|
||||
:raises KeyError: if key is not found
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
return shard[key]
|
||||
|
||||
def read(self, key):
|
||||
"""Return file handle corresponding to `key` from cache.
|
||||
|
||||
:param key: key for item
|
||||
:return: file open for reading in binary mode
|
||||
:raises KeyError: if key is not found
|
||||
|
||||
"""
|
||||
handle = self.get(key, default=ENOVAL, read=True, retry=True)
|
||||
if handle is ENOVAL:
|
||||
raise KeyError(key)
|
||||
return handle
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Return `True` if `key` matching item is found in cache.
|
||||
|
||||
:param key: key for item
|
||||
:return: True if key is found
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
return key in shard
|
||||
|
||||
def pop(
|
||||
self, key, default=None, expire_time=False, tag=False, retry=False
|
||||
): # noqa: E501
|
||||
"""Remove corresponding item for `key` from cache and return value.
|
||||
|
||||
If `key` is missing, return `default`.
|
||||
|
||||
Operation is atomic. Concurrent operations will be serialized.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param default: return value if key is missing (default None)
|
||||
:param float expire_time: if True, return expire_time in tuple
|
||||
(default False)
|
||||
:param tag: if True, return tag in tuple (default False)
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: value for item if key is found else default
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.pop(key, default, expire_time, tag, retry)
|
||||
except Timeout:
|
||||
return default
|
||||
|
||||
def delete(self, key, retry=False):
|
||||
"""Delete corresponding item for `key` from cache.
|
||||
|
||||
Missing keys are ignored.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param key: key for item
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: True if item was deleted
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
try:
|
||||
return shard.delete(key, retry)
|
||||
except Timeout:
|
||||
return False
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Delete corresponding item for `key` from cache.
|
||||
|
||||
Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
|
||||
|
||||
:param key: key for item
|
||||
:raises KeyError: if key is not found
|
||||
|
||||
"""
|
||||
index = self._hash(key) % self._count
|
||||
shard = self._shards[index]
|
||||
del shard[key]
|
||||
|
||||
def check(self, fix=False, retry=False):
|
||||
"""Check database and file system consistency.
|
||||
|
||||
Intended for use in testing and post-mortem error analysis.
|
||||
|
||||
While checking the cache table for consistency, a writer lock is held
|
||||
on the database. The lock blocks other cache clients from writing to
|
||||
the database. For caches with many file references, the lock may be
|
||||
held for a long time. For example, local benchmarking shows that a
|
||||
cache with 1,000 file references takes ~60ms to check.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param bool fix: correct inconsistencies
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: list of warnings
|
||||
:raises Timeout: if database timeout occurs
|
||||
|
||||
"""
|
||||
warnings = (shard.check(fix, retry) for shard in self._shards)
|
||||
return functools.reduce(operator.iadd, warnings, [])
|
||||
|
||||
def expire(self, retry=False):
|
||||
"""Remove expired items from cache.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._remove('expire', args=(time.time(),), retry=retry)
|
||||
|
||||
def create_tag_index(self):
|
||||
"""Create tag index on cache database.
|
||||
|
||||
Better to initialize cache with `tag_index=True` than use this.
|
||||
|
||||
:raises Timeout: if database timeout occurs
|
||||
|
||||
"""
|
||||
for shard in self._shards:
|
||||
shard.create_tag_index()
|
||||
|
||||
def drop_tag_index(self):
|
||||
"""Drop tag index on cache database.
|
||||
|
||||
:raises Timeout: if database timeout occurs
|
||||
|
||||
"""
|
||||
for shard in self._shards:
|
||||
shard.drop_tag_index()
|
||||
|
||||
def evict(self, tag, retry=False):
|
||||
"""Remove items with matching `tag` from cache.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param str tag: tag identifying items
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._remove('evict', args=(tag,), retry=retry)
|
||||
|
||||
def cull(self, retry=False):
|
||||
"""Cull items from cache until volume is less than size limit.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._remove('cull', retry=retry)
|
||||
|
||||
def clear(self, retry=False):
|
||||
"""Remove all items from cache.
|
||||
|
||||
If database timeout occurs then fails silently unless `retry` is set to
|
||||
`True` (default `False`).
|
||||
|
||||
:param bool retry: retry if database timeout occurs (default False)
|
||||
:return: count of items removed
|
||||
|
||||
"""
|
||||
return self._remove('clear', retry=retry)
|
||||
|
||||
def _remove(self, name, args=(), retry=False):
|
||||
total = 0
|
||||
for shard in self._shards:
|
||||
method = getattr(shard, name)
|
||||
while True:
|
||||
try:
|
||||
count = method(*args, retry=retry)
|
||||
total += count
|
||||
except Timeout as timeout:
|
||||
total += timeout.args[0]
|
||||
else:
|
||||
break
|
||||
return total
|
||||
|
||||
def stats(self, enable=True, reset=False):
|
||||
"""Return cache statistics hits and misses.
|
||||
|
||||
:param bool enable: enable collecting statistics (default True)
|
||||
:param bool reset: reset hits and misses to 0 (default False)
|
||||
:return: (hits, misses)
|
||||
|
||||
"""
|
||||
results = [shard.stats(enable, reset) for shard in self._shards]
|
||||
total_hits = sum(hits for hits, _ in results)
|
||||
total_misses = sum(misses for _, misses in results)
|
||||
return total_hits, total_misses
|
||||
|
||||
def volume(self):
|
||||
"""Return estimated total size of cache on disk.
|
||||
|
||||
:return: size in bytes
|
||||
|
||||
"""
|
||||
return sum(shard.volume() for shard in self._shards)
|
||||
|
||||
def close(self):
|
||||
"""Close database connection."""
|
||||
for shard in self._shards:
|
||||
shard.close()
|
||||
self._caches.clear()
|
||||
self._deques.clear()
|
||||
self._indexes.clear()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *exception):
|
||||
self.close()
|
||||
|
||||
def __getstate__(self):
|
||||
return (self._directory, self._count, self.timeout, type(self.disk))
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__init__(*state)
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate keys in cache including expired items."""
|
||||
iterators = (iter(shard) for shard in self._shards)
|
||||
return it.chain.from_iterable(iterators)
|
||||
|
||||
def __reversed__(self):
|
||||
"""Reverse iterate keys in cache including expired items."""
|
||||
iterators = (reversed(shard) for shard in reversed(self._shards))
|
||||
return it.chain.from_iterable(iterators)
|
||||
|
||||
def __len__(self):
|
||||
"""Count of items in cache including expired items."""
|
||||
return sum(len(shard) for shard in self._shards)
|
||||
|
||||
def reset(self, key, value=ENOVAL):
|
||||
"""Reset `key` and `value` item from Settings table.
|
||||
|
||||
If `value` is not given, it is reloaded from the Settings
|
||||
table. Otherwise, the Settings table is updated.
|
||||
|
||||
Settings attributes on cache objects are lazy-loaded and
|
||||
read-only. Use `reset` to update the value.
|
||||
|
||||
Settings with the ``sqlite_`` prefix correspond to SQLite
|
||||
pragmas. Updating the value will execute the corresponding PRAGMA
|
||||
statement.
|
||||
|
||||
:param str key: Settings key for item
|
||||
:param value: value for item (optional)
|
||||
:return: updated value for item
|
||||
|
||||
"""
|
||||
for shard in self._shards:
|
||||
while True:
|
||||
try:
|
||||
result = shard.reset(key, value)
|
||||
except Timeout:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
return result
|
||||
|
||||
def cache(self, name, timeout=60, disk=None, **settings):
|
||||
"""Return Cache with given `name` in subdirectory.
|
||||
|
||||
If disk is none (default), uses the fanout cache disk.
|
||||
|
||||
>>> fanout_cache = FanoutCache()
|
||||
>>> cache = fanout_cache.cache('test')
|
||||
>>> cache.set('abc', 123)
|
||||
True
|
||||
>>> cache.get('abc')
|
||||
123
|
||||
>>> len(cache)
|
||||
1
|
||||
>>> cache.delete('abc')
|
||||
True
|
||||
|
||||
:param str name: subdirectory name for Cache
|
||||
:param float timeout: SQLite connection timeout
|
||||
:param disk: Disk type or subclass for serialization
|
||||
:param settings: any of DEFAULT_SETTINGS
|
||||
:return: Cache with given name
|
||||
|
||||
"""
|
||||
_caches = self._caches
|
||||
|
||||
try:
|
||||
return _caches[name]
|
||||
except KeyError:
|
||||
parts = name.split('/')
|
||||
directory = op.join(self._directory, 'cache', *parts)
|
||||
temp = Cache(
|
||||
directory=directory,
|
||||
timeout=timeout,
|
||||
disk=self._disk if disk is None else Disk,
|
||||
**settings,
|
||||
)
|
||||
_caches[name] = temp
|
||||
return temp
|
||||
|
||||
def deque(self, name, maxlen=None):
|
||||
"""Return Deque with given `name` in subdirectory.
|
||||
|
||||
>>> cache = FanoutCache()
|
||||
>>> deque = cache.deque('test')
|
||||
>>> deque.extend('abc')
|
||||
>>> deque.popleft()
|
||||
'a'
|
||||
>>> deque.pop()
|
||||
'c'
|
||||
>>> len(deque)
|
||||
1
|
||||
|
||||
:param str name: subdirectory name for Deque
|
||||
:param maxlen: max length (default None, no max)
|
||||
:return: Deque with given name
|
||||
|
||||
"""
|
||||
_deques = self._deques
|
||||
|
||||
try:
|
||||
return _deques[name]
|
||||
except KeyError:
|
||||
parts = name.split('/')
|
||||
directory = op.join(self._directory, 'deque', *parts)
|
||||
cache = Cache(
|
||||
directory=directory,
|
||||
disk=self._disk,
|
||||
eviction_policy='none',
|
||||
)
|
||||
deque = Deque.fromcache(cache, maxlen=maxlen)
|
||||
_deques[name] = deque
|
||||
return deque
|
||||
|
||||
def index(self, name):
|
||||
"""Return Index with given `name` in subdirectory.
|
||||
|
||||
>>> cache = FanoutCache()
|
||||
>>> index = cache.index('test')
|
||||
>>> index['abc'] = 123
|
||||
>>> index['def'] = 456
|
||||
>>> index['ghi'] = 789
|
||||
>>> index.popitem()
|
||||
('ghi', 789)
|
||||
>>> del index['abc']
|
||||
>>> len(index)
|
||||
1
|
||||
>>> index['def']
|
||||
456
|
||||
|
||||
:param str name: subdirectory name for Index
|
||||
:return: Index with given name
|
||||
|
||||
"""
|
||||
_indexes = self._indexes
|
||||
|
||||
try:
|
||||
return _indexes[name]
|
||||
except KeyError:
|
||||
parts = name.split('/')
|
||||
directory = op.join(self._directory, 'index', *parts)
|
||||
cache = Cache(
|
||||
directory=directory,
|
||||
disk=self._disk,
|
||||
eviction_policy='none',
|
||||
)
|
||||
index = Index.fromcache(cache)
|
||||
_indexes[name] = index
|
||||
return index
|
||||
|
||||
|
||||
FanoutCache.memoize = Cache.memoize # type: ignore
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,488 @@
|
||||
"""Disk Cache Recipes
|
||||
"""
|
||||
|
||||
import functools
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
|
||||
from .core import ENOVAL, args_to_key, full_name
|
||||
|
||||
|
||||
class Averager:
|
||||
"""Recipe for calculating a running average.
|
||||
|
||||
Sometimes known as "online statistics," the running average maintains the
|
||||
total and count. The average can then be calculated at any time.
|
||||
|
||||
Assumes the key will not be evicted. Set the eviction policy to 'none' on
|
||||
the cache to guarantee the key is not evicted.
|
||||
|
||||
>>> import diskcache
|
||||
>>> cache = diskcache.FanoutCache()
|
||||
>>> ave = Averager(cache, 'latency')
|
||||
>>> ave.add(0.080)
|
||||
>>> ave.add(0.120)
|
||||
>>> ave.get()
|
||||
0.1
|
||||
>>> ave.add(0.160)
|
||||
>>> ave.pop()
|
||||
0.12
|
||||
>>> print(ave.get())
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, cache, key, expire=None, tag=None):
|
||||
self._cache = cache
|
||||
self._key = key
|
||||
self._expire = expire
|
||||
self._tag = tag
|
||||
|
||||
def add(self, value):
|
||||
"""Add `value` to average."""
|
||||
with self._cache.transact(retry=True):
|
||||
total, count = self._cache.get(self._key, default=(0.0, 0))
|
||||
total += value
|
||||
count += 1
|
||||
self._cache.set(
|
||||
self._key,
|
||||
(total, count),
|
||||
expire=self._expire,
|
||||
tag=self._tag,
|
||||
)
|
||||
|
||||
def get(self):
|
||||
"""Get current average or return `None` if count equals zero."""
|
||||
total, count = self._cache.get(self._key, default=(0.0, 0), retry=True)
|
||||
return None if count == 0 else total / count
|
||||
|
||||
def pop(self):
|
||||
"""Return current average and delete key."""
|
||||
total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True)
|
||||
return None if count == 0 else total / count
|
||||
|
||||
|
||||
class Lock:
|
||||
"""Recipe for cross-process and cross-thread lock.
|
||||
|
||||
Assumes the key will not be evicted. Set the eviction policy to 'none' on
|
||||
the cache to guarantee the key is not evicted.
|
||||
|
||||
>>> import diskcache
|
||||
>>> cache = diskcache.Cache()
|
||||
>>> lock = Lock(cache, 'report-123')
|
||||
>>> lock.acquire()
|
||||
>>> lock.release()
|
||||
>>> with lock:
|
||||
... pass
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, cache, key, expire=None, tag=None):
|
||||
self._cache = cache
|
||||
self._key = key
|
||||
self._expire = expire
|
||||
self._tag = tag
|
||||
|
||||
def acquire(self):
|
||||
"""Acquire lock using spin-lock algorithm."""
|
||||
while True:
|
||||
added = self._cache.add(
|
||||
self._key,
|
||||
None,
|
||||
expire=self._expire,
|
||||
tag=self._tag,
|
||||
retry=True,
|
||||
)
|
||||
if added:
|
||||
break
|
||||
time.sleep(0.001)
|
||||
|
||||
def release(self):
|
||||
"""Release lock by deleting key."""
|
||||
self._cache.delete(self._key, retry=True)
|
||||
|
||||
def locked(self):
|
||||
"""Return true if the lock is acquired."""
|
||||
return self._key in self._cache
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.release()
|
||||
|
||||
|
||||
class RLock:
|
||||
"""Recipe for cross-process and cross-thread re-entrant lock.
|
||||
|
||||
Assumes the key will not be evicted. Set the eviction policy to 'none' on
|
||||
the cache to guarantee the key is not evicted.
|
||||
|
||||
>>> import diskcache
|
||||
>>> cache = diskcache.Cache()
|
||||
>>> rlock = RLock(cache, 'user-123')
|
||||
>>> rlock.acquire()
|
||||
>>> rlock.acquire()
|
||||
>>> rlock.release()
|
||||
>>> with rlock:
|
||||
... pass
|
||||
>>> rlock.release()
|
||||
>>> rlock.release()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError: cannot release un-acquired lock
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, cache, key, expire=None, tag=None):
|
||||
self._cache = cache
|
||||
self._key = key
|
||||
self._expire = expire
|
||||
self._tag = tag
|
||||
|
||||
def acquire(self):
|
||||
"""Acquire lock by incrementing count using spin-lock algorithm."""
|
||||
pid = os.getpid()
|
||||
tid = threading.get_ident()
|
||||
pid_tid = '{}-{}'.format(pid, tid)
|
||||
|
||||
while True:
|
||||
with self._cache.transact(retry=True):
|
||||
value, count = self._cache.get(self._key, default=(None, 0))
|
||||
if pid_tid == value or count == 0:
|
||||
self._cache.set(
|
||||
self._key,
|
||||
(pid_tid, count + 1),
|
||||
expire=self._expire,
|
||||
tag=self._tag,
|
||||
)
|
||||
return
|
||||
time.sleep(0.001)
|
||||
|
||||
def release(self):
|
||||
"""Release lock by decrementing count."""
|
||||
pid = os.getpid()
|
||||
tid = threading.get_ident()
|
||||
pid_tid = '{}-{}'.format(pid, tid)
|
||||
|
||||
with self._cache.transact(retry=True):
|
||||
value, count = self._cache.get(self._key, default=(None, 0))
|
||||
is_owned = pid_tid == value and count > 0
|
||||
assert is_owned, 'cannot release un-acquired lock'
|
||||
self._cache.set(
|
||||
self._key,
|
||||
(value, count - 1),
|
||||
expire=self._expire,
|
||||
tag=self._tag,
|
||||
)
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.release()
|
||||
|
||||
|
||||
class BoundedSemaphore:
|
||||
"""Recipe for cross-process and cross-thread bounded semaphore.
|
||||
|
||||
Assumes the key will not be evicted. Set the eviction policy to 'none' on
|
||||
the cache to guarantee the key is not evicted.
|
||||
|
||||
>>> import diskcache
|
||||
>>> cache = diskcache.Cache()
|
||||
>>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2)
|
||||
>>> semaphore.acquire()
|
||||
>>> semaphore.acquire()
|
||||
>>> semaphore.release()
|
||||
>>> with semaphore:
|
||||
... pass
|
||||
>>> semaphore.release()
|
||||
>>> semaphore.release()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError: cannot release un-acquired semaphore
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, cache, key, value=1, expire=None, tag=None):
|
||||
self._cache = cache
|
||||
self._key = key
|
||||
self._value = value
|
||||
self._expire = expire
|
||||
self._tag = tag
|
||||
|
||||
def acquire(self):
|
||||
"""Acquire semaphore by decrementing value using spin-lock algorithm."""
|
||||
while True:
|
||||
with self._cache.transact(retry=True):
|
||||
value = self._cache.get(self._key, default=self._value)
|
||||
if value > 0:
|
||||
self._cache.set(
|
||||
self._key,
|
||||
value - 1,
|
||||
expire=self._expire,
|
||||
tag=self._tag,
|
||||
)
|
||||
return
|
||||
time.sleep(0.001)
|
||||
|
||||
def release(self):
|
||||
"""Release semaphore by incrementing value."""
|
||||
with self._cache.transact(retry=True):
|
||||
value = self._cache.get(self._key, default=self._value)
|
||||
assert self._value > value, 'cannot release un-acquired semaphore'
|
||||
value += 1
|
||||
self._cache.set(
|
||||
self._key,
|
||||
value,
|
||||
expire=self._expire,
|
||||
tag=self._tag,
|
||||
)
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
self.release()
|
||||
|
||||
|
||||
def throttle(
|
||||
cache,
|
||||
count,
|
||||
seconds,
|
||||
name=None,
|
||||
expire=None,
|
||||
tag=None,
|
||||
time_func=time.time,
|
||||
sleep_func=time.sleep,
|
||||
):
|
||||
"""Decorator to throttle calls to function.
|
||||
|
||||
Assumes keys will not be evicted. Set the eviction policy to 'none' on the
|
||||
cache to guarantee the keys are not evicted.
|
||||
|
||||
>>> import diskcache, time
|
||||
>>> cache = diskcache.Cache()
|
||||
>>> count = 0
|
||||
>>> @throttle(cache, 2, 1) # 2 calls per 1 second
|
||||
... def increment():
|
||||
... global count
|
||||
... count += 1
|
||||
>>> start = time.time()
|
||||
>>> while (time.time() - start) <= 2:
|
||||
... increment()
|
||||
>>> count in (6, 7) # 6 or 7 calls depending on CPU load
|
||||
True
|
||||
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
rate = count / float(seconds)
|
||||
key = full_name(func) if name is None else name
|
||||
now = time_func()
|
||||
cache.set(key, (now, count), expire=expire, tag=tag, retry=True)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
while True:
|
||||
with cache.transact(retry=True):
|
||||
last, tally = cache.get(key)
|
||||
now = time_func()
|
||||
tally += (now - last) * rate
|
||||
delay = 0
|
||||
|
||||
if tally > count:
|
||||
cache.set(key, (now, count - 1), expire)
|
||||
elif tally >= 1:
|
||||
cache.set(key, (now, tally - 1), expire)
|
||||
else:
|
||||
delay = (1 - tally) / rate
|
||||
|
||||
if delay:
|
||||
sleep_func(delay)
|
||||
else:
|
||||
break
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def barrier(cache, lock_factory, name=None, expire=None, tag=None):
|
||||
"""Barrier to calling decorated function.
|
||||
|
||||
Supports different kinds of locks: Lock, RLock, BoundedSemaphore.
|
||||
|
||||
Assumes keys will not be evicted. Set the eviction policy to 'none' on the
|
||||
cache to guarantee the keys are not evicted.
|
||||
|
||||
>>> import diskcache, time
|
||||
>>> cache = diskcache.Cache()
|
||||
>>> @barrier(cache, Lock)
|
||||
... def work(num):
|
||||
... print('worker started')
|
||||
... time.sleep(1)
|
||||
... print('worker finished')
|
||||
>>> import multiprocessing.pool
|
||||
>>> pool = multiprocessing.pool.ThreadPool(2)
|
||||
>>> _ = pool.map(work, range(2))
|
||||
worker started
|
||||
worker finished
|
||||
worker started
|
||||
worker finished
|
||||
>>> pool.terminate()
|
||||
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
key = full_name(func) if name is None else name
|
||||
lock = lock_factory(cache, key, expire=expire, tag=tag)
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
with lock:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def memoize_stampede(
|
||||
cache, expire, name=None, typed=False, tag=None, beta=1, ignore=()
|
||||
):
|
||||
"""Memoizing cache decorator with cache stampede protection.
|
||||
|
||||
Cache stampedes are a type of system overload that can occur when parallel
|
||||
computing systems using memoization come under heavy load. This behaviour
|
||||
is sometimes also called dog-piling, cache miss storm, cache choking, or
|
||||
the thundering herd problem.
|
||||
|
||||
The memoization decorator implements cache stampede protection through
|
||||
early recomputation. Early recomputation of function results will occur
|
||||
probabilistically before expiration in a background thread of
|
||||
execution. Early probabilistic recomputation is based on research by
|
||||
Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
|
||||
Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097
|
||||
|
||||
If name is set to None (default), the callable name will be determined
|
||||
automatically.
|
||||
|
||||
If typed is set to True, function arguments of different types will be
|
||||
cached separately. For example, f(3) and f(3.0) will be treated as distinct
|
||||
calls with distinct results.
|
||||
|
||||
The original underlying function is accessible through the `__wrapped__`
|
||||
attribute. This is useful for introspection, for bypassing the cache, or
|
||||
for rewrapping the function with a different cache.
|
||||
|
||||
>>> from diskcache import Cache
|
||||
>>> cache = Cache()
|
||||
>>> @memoize_stampede(cache, expire=1)
|
||||
... def fib(number):
|
||||
... if number == 0:
|
||||
... return 0
|
||||
... elif number == 1:
|
||||
... return 1
|
||||
... else:
|
||||
... return fib(number - 1) + fib(number - 2)
|
||||
>>> print(fib(100))
|
||||
354224848179261915075
|
||||
|
||||
An additional `__cache_key__` attribute can be used to generate the cache
|
||||
key used for the given arguments.
|
||||
|
||||
>>> key = fib.__cache_key__(100)
|
||||
>>> del cache[key]
|
||||
|
||||
Remember to call memoize when decorating a callable. If you forget, then a
|
||||
TypeError will occur.
|
||||
|
||||
:param cache: cache to store callable arguments and return values
|
||||
:param float expire: seconds until arguments expire
|
||||
:param str name: name given for callable (default None, automatic)
|
||||
:param bool typed: cache different types separately (default False)
|
||||
:param str tag: text to associate with arguments (default None)
|
||||
:param set ignore: positional or keyword args to ignore (default ())
|
||||
:return: callable decorator
|
||||
|
||||
"""
|
||||
# Caution: Nearly identical code exists in Cache.memoize
|
||||
def decorator(func):
|
||||
"""Decorator created by memoize call for callable."""
|
||||
base = (full_name(func),) if name is None else (name,)
|
||||
|
||||
def timer(*args, **kwargs):
|
||||
"""Time execution of `func` and return result and time delta."""
|
||||
start = time.time()
|
||||
result = func(*args, **kwargs)
|
||||
delta = time.time() - start
|
||||
return result, delta
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
"""Wrapper for callable to cache arguments and return values."""
|
||||
key = wrapper.__cache_key__(*args, **kwargs)
|
||||
pair, expire_time = cache.get(
|
||||
key,
|
||||
default=ENOVAL,
|
||||
expire_time=True,
|
||||
retry=True,
|
||||
)
|
||||
|
||||
if pair is not ENOVAL:
|
||||
result, delta = pair
|
||||
now = time.time()
|
||||
ttl = expire_time - now
|
||||
|
||||
if (-delta * beta * math.log(random.random())) < ttl:
|
||||
return result # Cache hit.
|
||||
|
||||
# Check whether a thread has started for early recomputation.
|
||||
|
||||
thread_key = key + (ENOVAL,)
|
||||
thread_added = cache.add(
|
||||
thread_key,
|
||||
None,
|
||||
expire=delta,
|
||||
retry=True,
|
||||
)
|
||||
|
||||
if thread_added:
|
||||
# Start thread for early recomputation.
|
||||
def recompute():
|
||||
with cache:
|
||||
pair = timer(*args, **kwargs)
|
||||
cache.set(
|
||||
key,
|
||||
pair,
|
||||
expire=expire,
|
||||
tag=tag,
|
||||
retry=True,
|
||||
)
|
||||
|
||||
thread = threading.Thread(target=recompute)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
return result
|
||||
|
||||
pair = timer(*args, **kwargs)
|
||||
cache.set(key, pair, expire=expire, tag=tag, retry=True)
|
||||
return pair[0]
|
||||
|
||||
def __cache_key__(*args, **kwargs):
|
||||
"""Make key for cache given function arguments."""
|
||||
return args_to_key(base, args, kwargs, typed, ignore)
|
||||
|
||||
wrapper.__cache_key__ = __cache_key__
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
Reference in New Issue
Block a user