def parse(self, transformed_name):
- "Convert the transform account name to an account name."
- return (transformed_name
- if self.rsep is None
- else transformed_name.replace(self.rsep, sep))
-
+
defparse(self,transformed_name:str)->Account:
+ "Convert the transform account name to an account name."
+ return(
+ transformed_name
+ ifself.rsepisNone
+ elsetransformed_name.replace(self.rsep,sep)
+ )
+
def render(self, account_name):
- "Convert the account name to a transformed account name."
- return (account_name
- if self.rsep is None
- else account_name.replace(sep, self.rsep))
-
+
defrender(self,account_name:Account)->str:
+ "Convert the account name to a transformed account name."
+ returnaccount_nameifself.rsepisNoneelseaccount_name.replace(sep,self.rsep)
+
accounts (Iterable[str]) – A sequence of account name strings.
@@ -1124,7 +1159,7 @@
Returns:
-
A string, the common parent account. If none, returns an empty string.
+
str – A string, the common parent account. If none, returns an empty string.
@@ -1132,22 +1167,21 @@
Source code in beancount/core/account.py
-
def commonprefix(accounts):
- """Return the common prefix of a list of account names.
-
- Args:
- accounts: A sequence of account name strings.
- Returns:
- A string, the common parent account. If none, returns an empty string.
- """
- accounts_lists = [account_.split(sep)
- for account_ in accounts]
- # Note: the os.path.commonprefix() function just happens to work here.
- # Inspect its code, and even the special case of no common prefix
- # works well with str.join() below.
- common_list = path.commonprefix(accounts_lists)
- return sep.join(common_list)
-
+
defcommonprefix(accounts:Iterable[Account])->Account:
+"""Return the common prefix of a list of account names.
+
+ Args:
+ accounts: A sequence of account name strings.
+ Returns:
+ A string, the common parent account. If none, returns an empty string.
+ """
+ accounts_lists=[account_.split(sep)foraccount_inaccounts]
+ # Note: the os.path.commonprefix() function just happens to work here.
+ # Inspect its code, and even the special case of no common prefix
+ # works well with str.join() below.
+ common_list=path.commonprefix(accounts_lists)
+ returnsep.join(common_list)
+
component – A string, a component of an account name. For instance,
+
account_name (str) – A string, an account name.
+
component (str) – A string, a component of an account name. For instance,
Food in Expenses:Food:Restaurant. All components are considered.
@@ -1206,19 +1240,19 @@
Source code in beancount/core/account.py
-
def has_component(account_name, component):
- """Return true if one of the account contains a given component.
-
- Args:
- account_name: A string, an account name.
- component: A string, a component of an account name. For instance,
- ``Food`` in ``Expenses:Food:Restaurant``. All components are considered.
- Returns:
- Boolean: true if the component is in the account. Note that a component
- name must be whole, that is ``NY`` is not in ``Expenses:Taxes:StateNY``.
- """
- return bool(re.search('(^|:){}(:|$)'.format(component), account_name))
-
+
defhas_component(account_name:Account,component:str)->bool:
+"""Return true if one of the account contains a given component.
+
+ Args:
+ account_name: A string, an account name.
+ component: A string, a component of an account name. For instance,
+ ``Food`` in ``Expenses:Food:Restaurant``. All components are considered.
+ Returns:
+ Boolean: true if the component is in the account. Note that a component
+ name must be whole, that is ``NY`` is not in ``Expenses:Taxes:StateNY``.
+ """
+ returnbool(re.search("(^|:){}(:|$)".format(component),account_name))
+
string – A string, to be checked for account name pattern.
+
string (str) – A string, to be checked for account name pattern.
@@ -1267,7 +1301,7 @@
Returns:
-
A boolean, true if the string has the form of an account's name.
+
bool – A boolean, true if the string has the form of an account's name.
@@ -1275,18 +1309,17 @@
Source code in beancount/core/account.py
-
def is_valid(string):
- """Return true if the given string is a valid account name.
- This does not check for the root account types, just the general syntax.
-
- Args:
- string: A string, to be checked for account name pattern.
- Returns:
- A boolean, true if the string has the form of an account's name.
- """
- return (isinstance(string, str) and
- bool(re.match('{}$'.format(ACCOUNT_RE), string)))
-
+
defis_valid(string:Account)->bool:
+"""Return true if the given string is a valid account name.
+ This does not check for the root account types, just the general syntax.
+
+ Args:
+ string: A string, to be checked for account name pattern.
+ Returns:
+ A boolean, true if the string has the form of an account's name.
+ """
+ returnisinstance(string,str)andbool(regex.match("{}$".format(ACCOUNT_RE),string))
+
*components – Strings, the components of an account name.
+
*components (Tuple[str]) – Strings, the components of an account name.
@@ -1334,7 +1367,7 @@
Returns:
-
A string, joined in a single account name.
+
str – A string, joined in a single account name.
@@ -1342,16 +1375,16 @@
Source code in beancount/core/account.py
-
def join(*components):
- """Join the names with the account separator.
-
- Args:
- *components: Strings, the components of an account name.
- Returns:
- A string, joined in a single account name.
- """
- return sep.join(components)
-
+
defjoin(*components:Tuple[str])->Account:
+"""Join the names with the account separator.
+
+ Args:
+ *components: Strings, the components of an account name.
+ Returns:
+ A string, joined in a single account name.
+ """
+ returnsep.join(components)
+
account_name – A string, the name of the account whose leaf name to return.
+
account_name (str) – A string, the name of the account whose leaf name to return.
@@ -1399,7 +1432,7 @@
Returns:
-
A string, the name of the leaf of the account.
+
str – A string, the name of the leaf of the account.
@@ -1407,17 +1440,17 @@
Source code in beancount/core/account.py
-
def leaf(account_name):
- """Get the name of the leaf of this account.
-
- Args:
- account_name: A string, the name of the account whose leaf name to return.
- Returns:
- A string, the name of the leaf of the account.
- """
- assert isinstance(account_name, str)
- return account_name.split(sep)[-1] if account_name else None
-
+
defleaf(account_name:Account)->Account:
+"""Get the name of the leaf of this account.
+
+ Args:
+ account_name: A string, the name of the account whose leaf name to return.
+ Returns:
+ A string, the name of the leaf of the account.
+ """
+ assertisinstance(account_name,str)
+ returnaccount_name.split(sep)[-1]ifaccount_nameelseNone
+
account_name – A string, the name of the account whose parent to return.
+
account_name (str) – A string, the name of the account whose parent to return.
@@ -1465,7 +1498,7 @@
Returns:
-
A string, the name of the parent account of this account.
+
str – A string, the name of the parent account of this account.
@@ -1473,21 +1506,21 @@
Source code in beancount/core/account.py
-
def parent(account_name):
- """Return the name of the parent account of the given account.
-
- Args:
- account_name: A string, the name of the account whose parent to return.
- Returns:
- A string, the name of the parent account of this account.
- """
- assert isinstance(account_name, str), account_name
- if not account_name:
- return None
- components = account_name.split(sep)
- components.pop(-1)
- return sep.join(components)
-
+
defparent(account_name:Account)->Account:
+"""Return the name of the parent account of the given account.
+
+ Args:
+ account_name: A string, the name of the account whose parent to return.
+ Returns:
+ A string, the name of the parent account of this account.
+ """
+ assertisinstance(account_name,str),account_name
+ ifnotaccount_name:
+ returnNone
+ components=account_name.split(sep)
+ components.pop(-1)
+ returnsep.join(components)
+
account_name – The name of the parent account we want to check for.
+
account_name (str) – The name of the parent account we want to check for.
@@ -1535,7 +1568,7 @@
Returns:
-
A callable, which, when called, will return true if the given account is a
+
Callable[[str], Any] – A callable, which, when called, will return true if the given account is a
child of account_name.
@@ -1544,17 +1577,17 @@
Source code in beancount/core/account.py
-
def parent_matcher(account_name):
- """Build a predicate that returns whether an account is under the given one.
-
- Args:
- account_name: The name of the parent account we want to check for.
- Returns:
- A callable, which, when called, will return true if the given account is a
- child of ``account_name``.
- """
- return re.compile(r'{}($|{})'.format(re.escape(account_name), sep)).match
-
+
defparent_matcher(account_name:Account)->Callable[[str],Any]:
+"""Build a predicate that returns whether an account is under the given one.
+
+ Args:
+ account_name: The name of the parent account we want to check for.
+ Returns:
+ A callable, which, when called, will return true if the given account is a
+ child of ``account_name``.
+ """
+ returnre.compile(r"{}($|{})".format(re.escape(account_name),sep)).match
+
account_name – The name of the account we want to start iterating from.
+
account_name (str) – The name of the account we want to start iterating from.
@@ -1602,7 +1635,7 @@
Returns:
-
A generator of account name strings.
+
Iterator[str] – A generator of account name strings.
@@ -1610,18 +1643,18 @@
Source code in beancount/core/account.py
-
def parents(account_name):
- """A generator of the names of the parents of this account, including this account.
-
- Args:
- account_name: The name of the account we want to start iterating from.
- Returns:
- A generator of account name strings.
- """
- while account_name:
- yield account_name
- account_name = parent(account_name)
-
+
defparents(account_name:Account)->Iterator[Account]:
+"""A generator of the names of the parents of this account, including this account.
+
+ Args:
+ account_name: The name of the account we want to start iterating from.
+ Returns:
+ A generator of account name strings.
+ """
+ whileaccount_name:
+ yieldaccount_name
+ account_name=parent(account_name)
+
num_components – An integer, the number of components to return.
-
account_name – A string, an account name.
+
num_components (int) – An integer, the number of components to return.
+
account_name (str) – A string, an account name.
@@ -1670,7 +1703,7 @@
Returns:
-
A string, the account root up to 'num_components' components.
+
str – A string, the account root up to 'num_components' components.
@@ -1678,17 +1711,17 @@
Source code in beancount/core/account.py
-
def root(num_components, account_name):
- """Return the first few components of an account's name.
-
- Args:
- num_components: An integer, the number of components to return.
- account_name: A string, an account name.
- Returns:
- A string, the account root up to 'num_components' components.
- """
- return join(*(split(account_name)[:num_components]))
-
+
defroot(num_components:int,account_name:Account)->str:
+"""Return the first few components of an account's name.
+
+ Args:
+ num_components: An integer, the number of components to return.
+ account_name: A string, an account name.
+ Returns:
+ A string, the account root up to 'num_components' components.
+ """
+ returnjoin(*(split(account_name)[:num_components]))
+
account_name – A string, the name of the account whose leaf name to return.
+
account_name (str) – A string, the name of the account whose leaf name to return.
@@ -1737,7 +1770,7 @@
Returns:
-
A string, the name of the non-root portion of this account name.
+
str – A string, the name of the non-root portion of this account name.
@@ -1745,20 +1778,20 @@
Source code in beancount/core/account.py
-
def sans_root(account_name):
- """Get the name of the account without the root.
-
- For example, an input of 'Assets:BofA:Checking' will produce 'BofA:Checking'.
-
- Args:
- account_name: A string, the name of the account whose leaf name to return.
- Returns:
- A string, the name of the non-root portion of this account name.
- """
- assert isinstance(account_name, str)
- components = account_name.split(sep)[1:]
- return join(*components) if account_name else None
-
+
defsans_root(account_name:Account)->Account:
+"""Get the name of the account without the root.
+
+ For example, an input of 'Assets:BofA:Checking' will produce 'BofA:Checking'.
+
+ Args:
+ account_name: A string, the name of the account whose leaf name to return.
+ Returns:
+ A string, the name of the non-root portion of this account name.
+ """
+ assertisinstance(account_name,str)
+ components=account_name.split(sep)[1:]
+ returnjoin(*components)ifaccount_nameelseNone
+
A list of strings, the components of the account name (without the separators).
+
List[str] – A list of strings, the components of the account name (without the separators).
@@ -1814,16 +1847,16 @@
Source code in beancount/core/account.py
-
def split(account_name):
- """Split an account's name into its components.
-
- Args:
- account_name: A string, an account name.
- Returns:
- A list of strings, the components of the account name (without the separators).
- """
- return account_name.split(sep)
-
+
defsplit(account_name:Account)->List[str]:
+"""Split an account's name into its components.
+
+ Args:
+ account_name: A string, an account name.
+ Returns:
+ A list of strings, the components of the account name (without the separators).
+ """
+ returnaccount_name.split(sep)
+
root_directory – A string, the name of the root of the hierarchy to be walked.
+
root_directory (str) – A string, the name of the root of the hierarchy to be walked.
@@ -1867,25 +1900,30 @@
Source code in beancount/core/account.py
-
def walk(root_directory):
- """A version of os.walk() which yields directories that are valid account names.
-
- This only yields directories that are accounts... it skips the other ones.
- For convenience, it also yields you the account's name.
-
- Args:
- root_directory: A string, the name of the root of the hierarchy to be walked.
- Yields:
- Tuples of (root, account-name, dirs, files), similar to os.walk().
- """
- for root, dirs, files in os.walk(root_directory):
- dirs.sort()
- files.sort()
- relroot = root[len(root_directory)+1:]
- account_name = relroot.replace(os.sep, sep)
- if is_valid(account_name):
- yield (root, account_name, dirs, files)
-
+
defwalk(root_directory:Account)->Iterator[Tuple[str,Account,List[str],List[str]]]:
+"""A version of os.walk() which yields directories that are valid account names.
+
+ This only yields directories that are accounts... it skips the other ones.
+ For convenience, it also yields you the account's name.
+
+ Args:
+ root_directory: A string, the name of the root of the hierarchy to be walked.
+ Yields:
+ Tuples of (root, account-name, dirs, files), similar to os.walk().
+ """
+ forroot,dirs,filesinos.walk(root_directory):
+ dirs.sort()
+ files.sort()
+ relroot=root[len(root_directory)+1:]
+ account_name=relroot.replace(os.sep,sep)
+ # The regex module does not handle Unicode characters in decomposed
+ # form. Python uses the normal form for representing string. However,
+ # some filesystems use the canonical decomposition form.
+ # See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize
+ account_name=unicodedata.normalize("NFKC",account_name)
+ ifis_valid(account_name):
+ yield(root,account_name,dirs,files)
+
account_name – A string, the name of the account whose sign is to return.
-
account_types – An optional instance of the current account_types.
+
account_name (str) – A string, the name of the account whose sign is to return.
+
account_types (AccountTypes) – An optional instance of the current account_types.
@@ -2103,7 +2141,7 @@
Returns:
-
+1 or -1, depending on the account's type.
+
int – +1 or -1, depending on the account's type.
@@ -2111,24 +2149,21 @@
Source code in beancount/core/account_types.py
-
def get_account_sign(account_name, account_types=None):
- """Return the sign of the normal balance of a particular account.
-
- Args:
- account_name: A string, the name of the account whose sign is to return.
- account_types: An optional instance of the current account_types.
- Returns:
- +1 or -1, depending on the account's type.
- """
- if account_types is None:
- account_types = DEFAULT_ACCOUNT_TYPES
- assert isinstance(account_name, str), "Account is not a string: {}".format(account_name)
- account_type = get_account_type(account_name)
- return (+1
- if account_type in (account_types.assets,
- account_types.expenses)
- else -1)
-
+
defget_account_sign(account_name:Account,account_types:AccountTypes=None)->int:
+"""Return the sign of the normal balance of a particular account.
+
+ Args:
+ account_name: A string, the name of the account whose sign is to return.
+ account_types: An optional instance of the current account_types.
+ Returns:
+ +1 or -1, depending on the account's type.
+ """
+ ifaccount_typesisNone:
+ account_types=DEFAULT_ACCOUNT_TYPES
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ account_type=get_account_type(account_name)
+ return+1ifaccount_typein(account_types.assets,account_types.expenses)else-1
+
account_types – An instance of AccountTypes, a tuple of account type names.
+
account_types (AccountTypes) – An instance of AccountTypes, a tuple of account type names.
@@ -2176,9 +2211,7 @@
Returns:
-
A function object to use as the optional 'key' argument to the sort
-function. It accepts a single argument, the account name to sort and
-produces a sortable key.
+
Tuple[str, str] – An object to use as the 'key' argument to the sort function.
@@ -2186,18 +2219,18 @@
Source code in beancount/core/account_types.py
-
def get_account_sort_key(account_types, account_name):
- """Return a tuple that can be used to order/sort account names.
-
- Args:
- account_types: An instance of AccountTypes, a tuple of account type names.
- Returns:
- A function object to use as the optional 'key' argument to the sort
- function. It accepts a single argument, the account name to sort and
- produces a sortable key.
- """
- return (account_types.index(get_account_type(account_name)), account_name)
-
+
defget_account_sort_key(
+ account_types:AccountTypes,account_name:Account
+)->Tuple[str,Account]:
+"""Return a tuple that can be used to order/sort account names.
+
+ Args:
+ account_types: An instance of AccountTypes, a tuple of account type names.
+ Returns:
+ An object to use as the 'key' argument to the sort function.
+ """
+ return(account_types.index(get_account_type(account_name)),account_name)
+
account_name – A string, the name of the account whose type is to return.
+
account_name (str) – A string, the name of the account whose type is to return.
@@ -2255,21 +2288,21 @@
Source code in beancount/core/account_types.py
-
def get_account_type(account_name):
- """Return the type of this account's name.
+
defget_account_type(account_name:Account):
+"""Return the type of this account's name.
- Warning: No check is made on the validity of the account type. This merely
- returns the root account of the corresponding account name.
+ Warning: No check is made on the validity of the account type. This merely
+ returns the root account of the corresponding account name.
- Args:
- account_name: A string, the name of the account whose type is to return.
- Returns:
- A string, the type of the account in 'account_name'.
+ Args:
+ account_name: A string, the name of the account whose type is to return.
+ Returns:
+ A string, the type of the account in 'account_name'.
- """
- assert isinstance(account_name, str), "Account is not a string: {}".format(account_name)
- return account.split(account_name)[0]
-
+ """
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ returnaccount.split(account_name)[0]
+
account_type – A string, the prefix type of the account.
-
account_name – A string, the name of the account whose type is to return.
+
account_type (str) – A string, the prefix type of the account.
+
account_name (str) – A string, the name of the account whose type is to return.
@@ -2320,7 +2353,7 @@
Returns:
-
A boolean, true if the account is of the given type.
+
bool – A boolean, true if the account is of the given type.
@@ -2328,20 +2361,20 @@
Source code in beancount/core/account_types.py
-
def is_account_type(account_type, account_name):
- """Return the type of this account's name.
-
- Warning: No check is made on the validity of the account type. This merely
- returns the root account of the corresponding account name.
-
- Args:
- account_type: A string, the prefix type of the account.
- account_name: A string, the name of the account whose type is to return.
- Returns:
- A boolean, true if the account is of the given type.
- """
- return bool(re.match('^{}{}'.format(account_type, account.sep), account_name))
-
+
defis_account_type(account_type:str,account_name:Account)->bool:
+"""Return the type of this account's name.
+
+ Warning: No check is made on the validity of the account type. This merely
+ returns the root account of the corresponding account name.
+
+ Args:
+ account_type: A string, the prefix type of the account.
+ account_name: A string, the name of the account whose type is to return.
+ Returns:
+ A boolean, true if the account is of the given type.
+ """
+ returnbool(re.match("^{}{}".format(account_type,account.sep),account_name))
+
account_types (AccountTypes) – An instance of AccountTypes.
@@ -2391,7 +2424,7 @@
Returns:
-
A boolean, true if the account is a balance sheet account.
+
bool – A boolean, true if the account is a balance sheet account.
@@ -2399,24 +2432,27 @@
Source code in beancount/core/account_types.py
-
def is_balance_sheet_account(account_name, account_types):
- """Return true if the given account is a balance sheet account.
- Assets, liabilities and equity accounts are balance sheet accounts.
-
- Args:
- account_name: A string, an account name.
- account_types: An instance of AccountTypes.
- Returns:
- A boolean, true if the account is a balance sheet account.
- """
- assert isinstance(account_name, str), "Account is not a string: {}".format(account_name)
- assert isinstance(account_types, AccountTypes), (
- "Account types has invalid type: {}".format(account_types))
- account_type = get_account_type(account_name)
- return account_type in (account_types.assets,
- account_types.liabilities,
- account_types.equity)
-
+
defis_balance_sheet_account(account_name:Account,account_types:AccountTypes)->bool:
+"""Return true if the given account is a balance sheet account.
+ Assets, liabilities and equity accounts are balance sheet accounts.
+
+ Args:
+ account_name: A string, an account name.
+ account_types: An instance of AccountTypes.
+ Returns:
+ A boolean, true if the account is a balance sheet account.
+ """
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ assertisinstance(
+ account_types,AccountTypes
+ ),"Account types has invalid type: {}".format(account_types)
+ account_type=get_account_type(account_name)
+ returnaccount_typein(
+ account_types.assets,
+ account_types.liabilities,
+ account_types.equity,
+ )
+
account_types (AccountTypes) – An instance of AccountTypes.
@@ -2465,7 +2501,7 @@
Returns:
-
A boolean, true if the account is an equity account.
+
bool – A boolean, true if the account is an equity account.
@@ -2473,21 +2509,22 @@
Source code in beancount/core/account_types.py
-
def is_equity_account(account_name, account_types):
- """Return true if the given account is an equity account.
-
- Args:
- account_name: A string, an account name.
- account_types: An instance of AccountTypes.
- Returns:
- A boolean, true if the account is an equity account.
- """
- assert isinstance(account_name, str), "Account is not a string: {}".format(account_name)
- assert isinstance(account_types, AccountTypes), (
- "Account types has invalid type: {}".format(account_types))
- account_type = get_account_type(account_name)
- return account_type == account_types.equity
-
+
defis_equity_account(account_name:Account,account_types:AccountTypes)->bool:
+"""Return true if the given account is an equity account.
+
+ Args:
+ account_name: A string, an account name.
+ account_types: An instance of AccountTypes.
+ Returns:
+ A boolean, true if the account is an equity account.
+ """
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ assertisinstance(
+ account_types,AccountTypes
+ ),"Account types has invalid type: {}".format(account_types)
+ account_type=get_account_type(account_name)
+ returnaccount_type==account_types.equity
+
account_types (AccountTypes) – An instance of AccountTypes.
@@ -2537,7 +2574,7 @@
Returns:
-
A boolean, true if the account is an income statement account.
+
bool – A boolean, true if the account is an income statement account.
@@ -2545,23 +2582,104 @@
Source code in beancount/core/account_types.py
-
def is_income_statement_account(account_name, account_types):
- """Return true if the given account is an income statement account.
- Income and expense accounts are income statement accounts.
-
- Args:
- account_name: A string, an account name.
- account_types: An instance of AccountTypes.
- Returns:
- A boolean, true if the account is an income statement account.
- """
- assert isinstance(account_name, str), "Account is not a string: {}".format(account_name)
- assert isinstance(account_types, AccountTypes), (
- "Account types has invalid type: {}".format(account_types))
- account_type = get_account_type(account_name)
- return account_type in (account_types.income,
- account_types.expenses)
-
+
defis_income_statement_account(account_name:Account,account_types:AccountTypes)->bool:
+"""Return true if the given account is an income statement account.
+ Income and expense accounts are income statement accounts.
+
+ Args:
+ account_name: A string, an account name.
+ account_types: An instance of AccountTypes.
+ Returns:
+ A boolean, true if the account is an income statement account.
+ """
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ assertisinstance(
+ account_types,AccountTypes
+ ),"Account types has invalid type: {}".format(account_types)
+ account_type=get_account_type(account_name)
+ returnaccount_typein(account_types.income,account_types.expenses)
+
Return true if the given account has inverted signs.
+
An inverted sign is the inverse as you'd expect in an external report, i.e.,
+with all positive signs expected.
+
+
+
+
+
+
+
+
+
Parameters:
+
+
+
account_name (str) – A string, an account name.
+
account_types (AccountTypes) – An instance of AccountTypes.
+
+
+
+
+
+
+
+
+
+
+
+
+
Returns:
+
+
+
bool – A boolean, true if the account has an inverted sign.
+
+
+
+
+
+
+ Source code in beancount/core/account_types.py
+
defis_inverted_account(account_name:Account,account_types:AccountTypes)->bool:
+"""Return true if the given account has inverted signs.
+
+ An inverted sign is the inverse as you'd expect in an external report, i.e.,
+ with all positive signs expected.
+
+ Args:
+ account_name: A string, an account name.
+ account_types: An instance of AccountTypes.
+ Returns:
+ A boolean, true if the account has an inverted sign.
+ """
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ assertisinstance(
+ account_types,AccountTypes
+ ),"Account types has invalid type: {}".format(account_types)
+ account_type=get_account_type(account_name)
+ returnaccount_typein(
+ account_types.liabilities,
+ account_types.income,
+ account_types.equity,
+ )
+
Return true if the account name is a root account.
-This function does not verify whether the account root is a valid
+
Return true if the account name is a root account.
+
This function does not verify whether the account root is a valid
one, just that it is a root account or not.
@@ -2595,11 +2713,7 @@
Parameters:
-
account_name – A string, the name of the account to check for.
-
account_types – An optional instance of the current account_types;
-if provided, we check against these values. If not provided, we
-merely check that name pattern is that of an account component with
-no separator.
+
account_name (str) – A string, the name of the account to check for.
@@ -2615,7 +2729,7 @@
Returns:
-
A boolean, true if the account is root account.
+
bool – A boolean, true if the account is root account.
@@ -2623,29 +2737,20 @@
Source code in beancount/core/account_types.py
-
def is_root_account(account_name, account_types=None):
- """Return true if the account name is a root account.
- This function does not verify whether the account root is a valid
- one, just that it is a root account or not.
-
- Args:
- account_name: A string, the name of the account to check for.
- account_types: An optional instance of the current account_types;
- if provided, we check against these values. If not provided, we
- merely check that name pattern is that of an account component with
- no separator.
- Returns:
- A boolean, true if the account is root account.
- """
- assert isinstance(account_name, str), "Account is not a string: {}".format(account_name)
- if account_types is not None:
- assert isinstance(account_types, AccountTypes), (
- "Account types has invalid type: {}".format(account_types))
- return account_name in account_types
- else:
- return (account_name and
- bool(re.match(r'([A-Z][A-Za-z0-9\-]+)$', account_name)))
-
+
defis_root_account(account_name:Account)->bool:
+"""Return true if the account name is a root account.
+
+ This function does not verify whether the account root is a valid
+ one, just that it is a root account or not.
+
+ Args:
+ account_name: A string, the name of the account to check for.
+ Returns:
+ A boolean, true if the account is root account.
+ """
+ assertisinstance(account_name,str),"Account is not a string: {}".format(account_name)
+ returnaccount_nameandbool(re.match(r"([A-Z][A-Za-z0-9\-]+)$",account_name))
+
def __bool__(self):
- """Boolean predicate returns true if the number is non-zero.
- Returns:
- A boolean, true if non-zero number.
- """
- return self.number != ZERO
-
+
def__bool__(self):
+"""Boolean predicate returns true if the number is non-zero.
+ Returns:
+ A boolean, true if non-zero number.
+ """
+ returnself.number!=ZERO
+
def __eq__(self, other):
- """Equality predicate. Returns true if both number and currency are equal.
- Returns:
- A boolean.
- """
- if other is None:
- return False
- return (self.number, self.currency) == (other.number, other.currency)
-
+
def__eq__(self,other):
+"""Equality predicate. Returns true if both number and currency are equal.
+ Returns:
+ A boolean.
+ """
+ ifotherisNone:
+ returnFalse
+ return(self.number,self.currency)==(other.number,other.currency)
+
def __hash__(self):
- """A hashing function for amounts. The hash includes the currency.
- Returns:
- An integer, the hash for this amount.
- """
- return hash((self.number, self.currency))
-
+
def__hash__(self):
+"""A hashing function for amounts. The hash includes the currency.
+ Returns:
+ An integer, the hash for this amount.
+ """
+ returnhash((self.number,self.currency))
+
def __lt__(self, other):
- """Ordering comparison. This is used in the sorting key of positions.
- Args:
- other: An instance of Amount.
- Returns:
- True if this is less than the other Amount.
- """
- return sortkey(self) < sortkey(other)
-
+
def__lt__(self,other):
+"""Ordering comparison. This is used in the sorting key of positions.
+ Args:
+ other: An instance of Amount.
+ Returns:
+ True if this is less than the other Amount.
+ """
+ returnsortkey(self)<sortkey(other)
+
def __neg__(self):
- """Return the negative of this amount.
- Returns:
- A new instance of Amount, with the negative number of units.
- """
- return Amount(-self.number, self.currency)
-
+
def__neg__(self):
+"""Return the negative of this amount.
+ Returns:
+ A new instance of Amount, with the negative number of units.
+ """
+ returnAmount(-self.number,self.currency)
+
number – A string or Decimal instance. Will get converted automatically.
+
number – A Decimal instance.
currency – A string, the currency symbol to use.
@@ -3033,17 +3137,17 @@
Source code in beancount/core/amount.py
-
def __new__(cls, number, currency):
- """Constructor from a number and currency.
-
- Args:
- number: A string or Decimal instance. Will get converted automatically.
- currency: A string, the currency symbol to use.
- """
- assert isinstance(number, Amount.valid_types_number), repr(number)
- assert isinstance(currency, Amount.valid_types_currency), repr(currency)
- return _Amount.__new__(cls, number, currency)
-
+
def__new__(cls,number,currency):
+"""Constructor from a number and currency.
+
+ Args:
+ number: A Decimal instance.
+ currency: A string, the currency symbol to use.
+ """
+ assertisinstance(number,Amount.valid_types_number),repr(number)
+ assertisinstance(currency,Amount.valid_types_currency),repr(currency)
+ return_Amount.__new__(cls,number,currency)
+
def __str__(self):
- """Convert an Amount instance to a printable string with the defaults.
-
- Returns:
- A formatted string of the quantized amount and symbol.
- """
- return self.to_string()
-
+
def__str__(self):
+"""Convert an Amount instance to a printable string with the defaults.
+
+ Returns:
+ A formatted string of the quantized amount and symbol.
+ """
+ returnself.to_string()
+
def __str__(self):
- """Convert an Amount instance to a printable string with the defaults.
-
- Returns:
- A formatted string of the quantized amount and symbol.
- """
- return self.to_string()
-
+
def__str__(self):
+"""Convert an Amount instance to a printable string with the defaults.
+
+ Returns:
+ A formatted string of the quantized amount and symbol.
+ """
+ returnself.to_string()
+
@staticmethod
-def from_string(string):
- """Create an amount from a string.
-
- This is a miniature parser used for building tests.
-
- Args:
- string: A string of <number> <currency>.
- Returns:
- A new instance of Amount.
- """
- match = re.match(r'\s*([-+]?[0-9.]+)\s+({currency})'.format(currency=CURRENCY_RE),
- string)
- if not match:
- raise ValueError("Invalid string for amount: '{}'".format(string))
- number, currency = match.group(1, 2)
- return Amount(D(number), currency)
-
+
@staticmethod
+deffrom_string(string):
+"""Create an amount from a string.
+
+ This is a miniature parser used for building tests.
+
+ Args:
+ string: A string of <number> <currency>.
+ Returns:
+ A new instance of Amount.
+ """
+ match=re.match(
+ r"\s*([-+]?[0-9.]+)\s+({currency})".format(currency=CURRENCY_RE),string
+ )
+ ifnotmatch:
+ raiseValueError("Invalid string for amount: '{}'".format(string))
+ number,currency=match.group(1,2)
+ returnAmount(D(number),currency)
+
@@ -3233,7 +3338,7 @@
-beancount.core.amount.Amount.to_string(self, dformat=<beancount.core.display_context.DisplayFormatter object at 0x78e868ae5e50>)
+beancount.core.amount.Amount.to_string(self,dformat=<beancount.core.display_context.DisplayFormatterobjectat0x78fcde6b7290>)
@@ -3276,19 +3381,22 @@
Source code in beancount/core/amount.py
-
def to_string(self, dformat=DEFAULT_FORMATTER):
- """Convert an Amount instance to a printable string.
-
- Args:
- dformat: An instance of DisplayFormatter.
- Returns:
- A formatted string of the quantized amount and symbol.
- """
- number_fmt = (dformat.format(self.number, self.currency)
- if isinstance(self.number, Decimal)
- else str(self.number))
- return "{} {}".format(number_fmt, self.currency)
-
+
defto_string(self,dformat=DEFAULT_FORMATTER):
+"""Convert an Amount instance to a printable string.
+
+ Args:
+ dformat: An instance of DisplayFormatter.
+ Returns:
+ A formatted string of the quantized amount and symbol.
+ """
+ ifisinstance(self.number,Decimal):
+ number_fmt=dformat.format(self.number,self.currency)
+ elifself.numberisMISSING:
+ number_fmt=""
+ else:
+ number_fmt=str(self.number)
+ return"{}{}".format(number_fmt,self.currency)
+
@staticmethod
-def from_string(string):
- """Create an amount from a string.
-
- This is a miniature parser used for building tests.
-
- Args:
- string: A string of <number> <currency>.
- Returns:
- A new instance of Amount.
- """
- match = re.match(r'\s*([-+]?[0-9.]+)\s+({currency})'.format(currency=CURRENCY_RE),
- string)
- if not match:
- raise ValueError("Invalid string for amount: '{}'".format(string))
- number, currency = match.group(1, 2)
- return Amount(D(number), currency)
-
+
@staticmethod
+deffrom_string(string):
+"""Create an amount from a string.
+
+ This is a miniature parser used for building tests.
+
+ Args:
+ string: A string of <number> <currency>.
+ Returns:
+ A new instance of Amount.
+ """
+ match=re.match(
+ r"\s*([-+]?[0-9.]+)\s+({currency})".format(currency=CURRENCY_RE),string
+ )
+ ifnotmatch:
+ raiseValueError("Invalid string for amount: '{}'".format(string))
+ number,currency=match.group(1,2)
+ returnAmount(D(number),currency)
+
def abs(amount):
- """Return the absolute value of the given amount.
-
- Args:
- amount: An instance of Amount.
- Returns:
- An instance of Amount.
- """
- return (amount
- if amount.number >= ZERO
- else Amount(-amount.number, amount.currency))
-
+
defabs(amount):
+"""Return the absolute value of the given amount.
+
+ Args:
+ amount: An instance of Amount.
+ Returns:
+ An instance of Amount.
+ """
+ returnamountifamount.number>=ZEROelseAmount(-amount.number,amount.currency)
+
def add(amount1, amount2):
- """Add the given amounts with the same currency.
-
- Args:
- amount1: An instance of Amount.
- amount2: An instance of Amount.
- Returns:
- An instance of Amount, with the sum the two amount's numbers, in the same
- currency.
- """
- assert isinstance(amount1.number, Decimal), (
- "Amount1's number is not a Decimal instance: {}".format(amount1.number))
- assert isinstance(amount2.number, Decimal), (
- "Amount2's number is not a Decimal instance: {}".format(amount2.number))
- if amount1.currency != amount2.currency:
- raise ValueError(
- "Unmatching currencies for operation on {} and {}".format(
- amount1, amount2))
- return Amount(amount1.number + amount2.number, amount1.currency)
-
+
defadd(amount1,amount2):
+"""Add the given amounts with the same currency.
+
+ Args:
+ amount1: An instance of Amount.
+ amount2: An instance of Amount.
+ Returns:
+ An instance of Amount, with the sum the two amount's numbers, in the same
+ currency.
+ """
+ assertisinstance(
+ amount1.number,Decimal
+ ),"Amount1's number is not a Decimal instance: {}".format(amount1.number)
+ assertisinstance(
+ amount2.number,Decimal
+ ),"Amount2's number is not a Decimal instance: {}".format(amount2.number)
+ ifamount1.currency!=amount2.currency:
+ raiseValueError(
+ "Unmatching currencies for operation on {} and {}".format(amount1,amount2)
+ )
+ returnAmount(amount1.number+amount2.number,amount1.currency)
+
def div(amount, number):
- """Divide the given amount by a number.
-
- Args:
- amount: An instance of Amount.
- number: A decimal number.
- Returns:
- An Amount, with the same currency, but with amount units divided by 'number'.
- """
- assert isinstance(amount.number, Decimal), (
- "Amount's number is not a Decimal instance: {}".format(amount.number))
- assert isinstance(number, Decimal), (
- "Number is not a Decimal instance: {}".format(number))
- return Amount(amount.number / number, amount.currency)
-
+
defdiv(amount,number):
+"""Divide the given amount by a number.
+
+ Args:
+ amount: An instance of Amount.
+ number: A decimal number.
+ Returns:
+ An Amount, with the same currency, but with amount units divided by 'number'.
+ """
+ assertisinstance(
+ amount.number,Decimal
+ ),"Amount's number is not a Decimal instance: {}".format(amount.number)
+ assertisinstance(number,Decimal),"Number is not a Decimal instance: {}".format(
+ number
+ )
+ returnAmount(amount.number/number,amount.currency)
+
@staticmethod
-def from_string(string):
- """Create an amount from a string.
-
- This is a miniature parser used for building tests.
-
- Args:
- string: A string of <number> <currency>.
- Returns:
- A new instance of Amount.
- """
- match = re.match(r'\s*([-+]?[0-9.]+)\s+({currency})'.format(currency=CURRENCY_RE),
- string)
- if not match:
- raise ValueError("Invalid string for amount: '{}'".format(string))
- number, currency = match.group(1, 2)
- return Amount(D(number), currency)
-
+
@staticmethod
+deffrom_string(string):
+"""Create an amount from a string.
+
+ This is a miniature parser used for building tests.
+
+ Args:
+ string: A string of <number> <currency>.
+ Returns:
+ A new instance of Amount.
+ """
+ match=re.match(
+ r"\s*([-+]?[0-9.]+)\s+({currency})".format(currency=CURRENCY_RE),string
+ )
+ ifnotmatch:
+ raiseValueError("Invalid string for amount: '{}'".format(string))
+ number,currency=match.group(1,2)
+ returnAmount(D(number),currency)
+
def mul(amount, number):
- """Multiply the given amount by a number.
-
- Args:
- amount: An instance of Amount.
- number: A decimal number.
- Returns:
- An Amount, with the same currency, but with 'number' times units.
- """
- assert isinstance(amount.number, Decimal), (
- "Amount's number is not a Decimal instance: {}".format(amount.number))
- assert isinstance(number, Decimal), (
- "Number is not a Decimal instance: {}".format(number))
- return Amount(amount.number * number, amount.currency)
-
+
defmul(amount,number):
+"""Multiply the given amount by a number.
+
+ Args:
+ amount: An instance of Amount.
+ number: A decimal number.
+ Returns:
+ An Amount, with the same currency, but with 'number' times units.
+ """
+ assertisinstance(
+ amount.number,Decimal
+ ),"Amount's number is not a Decimal instance: {}".format(amount.number)
+ assertisinstance(number,Decimal),"Number is not a Decimal instance: {}".format(
+ number
+ )
+ returnAmount(amount.number*number,amount.currency)
+
def sortkey(amount):
- """A comparison function that sorts by currency first.
-
- Args:
- amount: An instance of Amount.
- Returns:
- A sort key, composed of the currency first and then the number.
- """
- return (amount.currency, amount.number)
-
+
defsortkey(amount):
+"""A comparison function that sorts by currency first.
+
+ Args:
+ amount: An instance of Amount.
+ Returns:
+ A sort key, composed of the currency first and then the number.
+ """
+ return(amount.currency,amount.number)
+
def sub(amount1, amount2):
- """Subtract the given amounts with the same currency.
-
- Args:
- amount1: An instance of Amount.
- amount2: An instance of Amount.
- Returns:
- An instance of Amount, with the difference between the two amount's
- numbers, in the same currency.
- """
- assert isinstance(amount1.number, Decimal), (
- "Amount1's number is not a Decimal instance: {}".format(amount1.number))
- assert isinstance(amount2.number, Decimal), (
- "Amount2's number is not a Decimal instance: {}".format(amount2.number))
- if amount1.currency != amount2.currency:
- raise ValueError(
- "Unmatching currencies for operation on {} and {}".format(
- amount1, amount2))
- return Amount(amount1.number - amount2.number, amount1.currency)
-
+
defsub(amount1,amount2):
+"""Subtract the given amounts with the same currency.
+
+ Args:
+ amount1: An instance of Amount.
+ amount2: An instance of Amount.
+ Returns:
+ An instance of Amount, with the difference between the two amount's
+ numbers, in the same currency.
+ """
+ assertisinstance(
+ amount1.number,Decimal
+ ),"Amount1's number is not a Decimal instance: {}".format(amount1.number)
+ assertisinstance(
+ amount2.number,Decimal
+ ),"Amount2's number is not a Decimal instance: {}".format(amount2.number)
+ ifamount1.currency!=amount2.currency:
+ raiseValueError(
+ "Unmatching currencies for operation on {} and {}".format(amount1,amount2)
+ )
+ returnAmount(amount1.number-amount2.number,amount1.currency)
+
def compare_entries(entries1, entries2):
- """Compare two lists of entries. This is used for testing.
-
- The entries are compared with disregard for their file location.
-
- Args:
- entries1: A list of directives of any type.
- entries2: Another list of directives of any type.
- Returns:
- A tuple of (success, not_found1, not_found2), where the fields are:
- success: A boolean, true if all the values are equal.
- missing1: A list of directives from 'entries1' not found in
- 'entries2'.
- missing2: A list of directives from 'entries2' not found in
- 'entries1'.
- Raises:
- ValueError: If a duplicate entry is found.
- """
- hashes1, errors1 = hash_entries(entries1, exclude_meta=True)
- hashes2, errors2 = hash_entries(entries2, exclude_meta=True)
- keys1 = set(hashes1.keys())
- keys2 = set(hashes2.keys())
-
- if errors1 or errors2:
- error = (errors1 + errors2)[0]
- raise ValueError(str(error))
-
- same = keys1 == keys2
- missing1 = data.sorted([hashes1[key] for key in keys1 - keys2])
- missing2 = data.sorted([hashes2[key] for key in keys2 - keys1])
- return (same, missing1, missing2)
-
+
defcompare_entries(entries1,entries2):
+"""Compare two lists of entries. This is used for testing.
+
+ The entries are compared with disregard for their file location.
+
+ Args:
+ entries1: A list of directives of any type.
+ entries2: Another list of directives of any type.
+ Returns:
+ A tuple of (success, not_found1, not_found2), where the fields are:
+ success: A boolean, true if all the values are equal.
+ missing1: A list of directives from 'entries1' not found in
+ 'entries2'.
+ missing2: A list of directives from 'entries2' not found in
+ 'entries1'.
+ Raises:
+ ValueError: If a duplicate entry is found.
+ """
+ hashes1,errors1=hash_entries(entries1,exclude_meta=True)
+ hashes2,errors2=hash_entries(entries2,exclude_meta=True)
+ keys1=set(hashes1.keys())
+ keys2=set(hashes2.keys())
+
+ iferrors1orerrors2:
+ error=(errors1+errors2)[0]
+ raiseValueError(str(error))
+
+ same=keys1==keys2
+ missing1=data.sorted([hashes1[key]forkeyinkeys1-keys2])
+ missing2=data.sorted([hashes2[key]forkeyinkeys2-keys1])
+ return(same,missing1,missing2)
+
def excludes_entries(subset_entries, entries):
- """Check that a list of entries does not appear in another list.
-
- Args:
- subset_entries: The set of entries to look for in 'entries'.
- entries: The larger list of entries that should not include 'subset_entries'.
- Returns:
- A boolean and a list of entries that are not supposed to appear.
- Raises:
- ValueError: If a duplicate entry is found.
- """
- subset_hashes, subset_errors = hash_entries(subset_entries, exclude_meta=True)
- subset_keys = set(subset_hashes.keys())
- hashes, errors = hash_entries(entries, exclude_meta=True)
- keys = set(hashes.keys())
-
- if subset_errors or errors:
- error = (subset_errors + errors)[0]
- raise ValueError(str(error))
-
- intersection = keys.intersection(subset_keys)
- excludes = not bool(intersection)
- extra = data.sorted([subset_hashes[key] for key in intersection])
- return (excludes, extra)
-
+
defexcludes_entries(subset_entries,entries):
+"""Check that a list of entries does not appear in another list.
+
+ Args:
+ subset_entries: The set of entries to look for in 'entries'.
+ entries: The larger list of entries that should not include 'subset_entries'.
+ Returns:
+ A boolean and a list of entries that are not supposed to appear.
+ Raises:
+ ValueError: If a duplicate entry is found.
+ """
+ subset_hashes,subset_errors=hash_entries(subset_entries,exclude_meta=True)
+ subset_keys=set(subset_hashes.keys())
+ hashes,errors=hash_entries(entries,exclude_meta=True)
+ keys=set(hashes.keys())
+
+ ifsubset_errorsorerrors:
+ error=(subset_errors+errors)[0]
+ raiseValueError(str(error))
+
+ intersection=keys.intersection(subset_keys)
+ excludes=notbool(intersection)
+ extra=data.sorted([subset_hashes[key]forkeyinintersection])
+ return(excludes,extra)
+
def hash_entries(entries, exclude_meta=False):
- """Compute unique hashes of each of the entries and return a map of them.
-
- This is used for comparisons between sets of entries.
-
- Args:
- entries: A list of directives.
- exclude_meta: If set, exclude the metadata from the hash. Use this for
- unit tests comparing entries coming from different sources as the
- filename and lineno will be distinct. However, when you're using the
- hashes to uniquely identify transactions, you want to include the
- filenames and line numbers (the default).
- Returns:
- A dict of hash-value to entry (for all entries) and a list of errors.
- Errors are created when duplicate entries are found.
- """
- entry_hash_dict = {}
- errors = []
- num_legal_duplicates = 0
- for entry in entries:
- hash_ = hash_entry(entry, exclude_meta)
-
- if hash_ in entry_hash_dict:
- if isinstance(entry, Price):
- # Note: Allow duplicate Price entries, they should be common
- # because of the nature of stock markets (if they're closed, the
- # data source is likely to return an entry for the previously
- # available date, which may already have been fetched).
- num_legal_duplicates += 1
- else:
- other_entry = entry_hash_dict[hash_]
- errors.append(
- CompareError(entry.meta,
- "Duplicate entry: {} == {}".format(entry, other_entry),
- entry))
- entry_hash_dict[hash_] = entry
-
- if not errors:
- assert len(entry_hash_dict) + num_legal_duplicates == len(entries), (
- len(entry_hash_dict), len(entries), num_legal_duplicates)
- return entry_hash_dict, errors
-
+
defhash_entries(entries,exclude_meta=False):
+"""Compute unique hashes of each of the entries and return a map of them.
+
+ This is used for comparisons between sets of entries.
+
+ Args:
+ entries: A list of directives.
+ exclude_meta: If set, exclude the metadata from the hash. Use this for
+ unit tests comparing entries coming from different sources as the
+ filename and lineno will be distinct. However, when you're using the
+ hashes to uniquely identify transactions, you want to include the
+ filenames and line numbers (the default).
+ Returns:
+ A dict of hash-value to entry (for all entries) and a list of errors.
+ Errors are created when duplicate entries are found.
+ """
+ entry_hash_dict={}
+ errors=[]
+ num_legal_duplicates=0
+ forentryinentries:
+ hash_=hash_entry(entry,exclude_meta)
+
+ ifhash_inentry_hash_dict:
+ ifisinstance(entry,Price):
+ # Note: Allow duplicate Price entries, they should be common
+ # because of the nature of stock markets (if they're closed, the
+ # data source is likely to return an entry for the previously
+ # available date, which may already have been fetched).
+ num_legal_duplicates+=1
+ else:
+ other_entry=entry_hash_dict[hash_]
+ errors.append(
+ CompareError(
+ entry.meta,
+ "Duplicate entry: {} == {}".format(entry,other_entry),
+ entry,
+ )
+ )
+ entry_hash_dict[hash_]=entry
+
+ ifnoterrors:
+ assertlen(entry_hash_dict)+num_legal_duplicates==len(entries),(
+ len(entry_hash_dict),
+ len(entries),
+ num_legal_duplicates,
+ )
+ returnentry_hash_dict,errors
+
def hash_entry(entry, exclude_meta=False):
- """Compute the stable hash of a single entry.
-
- Args:
- entry: A directive instance.
- exclude_meta: If set, exclude the metadata from the hash. Use this for
- unit tests comparing entries coming from different sources as the
- filename and lineno will be distinct. However, when you're using the
- hashes to uniquely identify transactions, you want to include the
- filenames and line numbers (the default).
- Returns:
- A stable hexadecimal hash of this entry.
-
- """
- return stable_hash_namedtuple(entry,
- IGNORED_FIELD_NAMES if exclude_meta else frozenset())
-
+
defhash_entry(entry,exclude_meta=False):
+"""Compute the stable hash of a single entry.
+
+ Args:
+ entry: A directive instance.
+ exclude_meta: If set, exclude the metadata from the hash. Use this for
+ unit tests comparing entries coming from different sources as the
+ filename and lineno will be distinct. However, when you're using the
+ hashes to uniquely identify transactions, you want to include the
+ filenames and line numbers (the default).
+ Returns:
+ A stable hexadecimal hash of this entry.
+
+ """
+ returnstable_hash_namedtuple(
+ entry,IGNORED_FIELD_NAMESifexclude_metaelsefrozenset()
+ )
+
def includes_entries(subset_entries, entries):
- """Check if a list of entries is included in another list.
-
- Args:
- subset_entries: The set of entries to look for in 'entries'.
- entries: The larger list of entries that could include 'subset_entries'.
- Returns:
- A boolean and a list of missing entries.
- Raises:
- ValueError: If a duplicate entry is found.
- """
- subset_hashes, subset_errors = hash_entries(subset_entries, exclude_meta=True)
- subset_keys = set(subset_hashes.keys())
- hashes, errors = hash_entries(entries, exclude_meta=True)
- keys = set(hashes.keys())
-
- if subset_errors or errors:
- error = (subset_errors + errors)[0]
- raise ValueError(str(error))
-
- includes = subset_keys.issubset(keys)
- missing = data.sorted([subset_hashes[key] for key in subset_keys - keys])
- return (includes, missing)
-
+
defincludes_entries(subset_entries,entries):
+"""Check if a list of entries is included in another list.
+
+ Args:
+ subset_entries: The set of entries to look for in 'entries'.
+ entries: The larger list of entries that could include 'subset_entries'.
+ Returns:
+ A boolean and a list of missing entries.
+ Raises:
+ ValueError: If a duplicate entry is found.
+ """
+ subset_hashes,subset_errors=hash_entries(subset_entries,exclude_meta=True)
+ subset_keys=set(subset_hashes.keys())
+ hashes,errors=hash_entries(entries,exclude_meta=True)
+ keys=set(hashes.keys())
+
+ ifsubset_errorsorerrors:
+ error=(subset_errors+errors)[0]
+ raiseValueError(str(error))
+
+ includes=subset_keys.issubset(keys)
+ missing=data.sorted([subset_hashes[key]forkeyinsubset_keys-keys])
+ return(includes,missing)
+
def stable_hash_namedtuple(objtuple, ignore=frozenset()):
- """Hash the given namedtuple and its child fields.
-
- This iterates over all the members of objtuple, skipping the attributes from
- the 'ignore' set, and computes a unique hash string code. If the elements
- are lists or sets, sorts them for stability.
-
- Args:
- objtuple: A tuple object or other.
- ignore: A set of strings, attribute names to be skipped in
- computing a stable hash. For instance, circular references to objects
- or irrelevant data.
-
- """
- # Note: this routine is slow and would stand to be implemented in C.
- hashobj = hashlib.md5()
- for attr_name, attr_value in zip(objtuple._fields, objtuple):
- if attr_name in ignore:
- continue
- if isinstance(attr_value, (list, set, frozenset)):
- subhashes = set()
- for element in attr_value:
- if isinstance(element, tuple):
- subhashes.add(stable_hash_namedtuple(element, ignore))
- else:
- md5 = hashlib.md5()
- md5.update(str(element).encode())
- subhashes.add(md5.hexdigest())
- for subhash in sorted(subhashes):
- hashobj.update(subhash.encode())
- else:
- hashobj.update(str(attr_value).encode())
- return hashobj.hexdigest()
-
+
defstable_hash_namedtuple(objtuple,ignore=frozenset()):
+"""Hash the given namedtuple and its child fields.
+
+ This iterates over all the members of objtuple, skipping the attributes from
+ the 'ignore' set, and computes a unique hash string code. If the elements
+ are lists or sets, sorts them for stability.
+
+ Args:
+ objtuple: A tuple object or other.
+ ignore: A set of strings, attribute names to be skipped in
+ computing a stable hash. For instance, circular references to objects
+ or irrelevant data.
+
+ """
+ # Note: this routine is slow and would stand to be implemented in C.
+ hashobj=hashlib.md5()
+ forattr_name,attr_valueinzip(objtuple._fields,objtuple):
+ ifattr_nameinignore:
+ continue
+ ifisinstance(attr_value,(list,set,frozenset)):
+ subhashes=[]
+ forelementinattr_value:
+ ifisinstance(element,tuple):
+ subhashes.append(stable_hash_namedtuple(element,ignore))
+ else:
+ md5=hashlib.md5()
+ md5.update(str(element).encode())
+ subhashes.append(md5.hexdigest())
+ forsubhashinsorted(subhashes):
+ hashobj.update(subhash.encode())
+ else:
+ hashobj.update(str(attr_value).encode())
+ returnhashobj.hexdigest()
+
def convert_amount(amt, target_currency, price_map, date=None, via=None):
- """Return the market value of an Amount in a particular currency.
-
- In addition, if a conversion rate isn't available, you can provide a list of
- currencies to attempt to synthesize a rate for via implied rates.
-
- Args:
- amt: An instance of Amount.
- target_currency: The target currency to convert to.
- price_map: A dict of prices, as built by prices.build_price_map().
- date: A datetime.date instance to evaluate the value at, or None.
- via: A list of currencies to attempt to synthesize an implied rate if the
- direct conversion fails.
- Returns:
- An Amount, either with a successful value currency conversion, or if we
- could not convert the value, the amount itself, unmodified.
-
- """
- # First, attempt to convert directly. This should be the most
- # straightforward conversion.
- base_quote = (amt.currency, target_currency)
- _, rate = prices.get_price(price_map, base_quote, date)
- if rate is not None:
- # On success, just make the conversion directly.
- return Amount(amt.number * rate, target_currency)
- elif via:
- assert isinstance(via, (tuple, list))
-
- # A price is unavailable, attempt to convert via cost/price currency
- # hop, if the value currency isn't the target currency.
- for implied_currency in via:
- if implied_currency == target_currency:
- continue
- base_quote1 = (amt.currency, implied_currency)
- _, rate1 = prices.get_price(price_map, base_quote1, date)
- if rate1 is not None:
- base_quote2 = (implied_currency, target_currency)
- _, rate2 = prices.get_price(price_map, base_quote2, date)
- if rate2 is not None:
- return Amount(amt.number * rate1 * rate2, target_currency)
-
- # We failed to infer a conversion rate; return the amt.
- return amt
-
+
defconvert_amount(amt,target_currency,price_map,date=None,via=None):
+"""Return the market value of an Amount in a particular currency.
+
+ In addition, if a conversion rate isn't available, you can provide a list of
+ currencies to attempt to synthesize a rate for via implied rates.
+
+ Args:
+ amt: An instance of Amount.
+ target_currency: The target currency to convert to.
+ price_map: A dict of prices, as built by prices.build_price_map().
+ date: A datetime.date instance to evaluate the value at, or None.
+ via: A list of currencies to attempt to synthesize an implied rate if the
+ direct conversion fails.
+ Returns:
+ An Amount, either with a successful value currency conversion, or if we
+ could not convert the value, the amount itself, unmodified.
+
+ """
+ # First, attempt to convert directly. This should be the most
+ # straightforward conversion.
+ base_quote=(amt.currency,target_currency)
+ _,rate=prices.get_price(price_map,base_quote,date)
+ ifrateisnotNone:
+ # On success, just make the conversion directly.
+ returnAmount(amt.number*rate,target_currency)
+ elifvia:
+ assertisinstance(via,(tuple,list))
+
+ # A price is unavailable, attempt to convert via cost/price currency
+ # hop, if the value currency isn't the target currency.
+ forimplied_currencyinvia:
+ ifimplied_currency==target_currency:
+ continue
+ base_quote1=(amt.currency,implied_currency)
+ _,rate1=prices.get_price(price_map,base_quote1,date)
+ ifrate1isnotNone:
+ base_quote2=(implied_currency,target_currency)
+ _,rate2=prices.get_price(price_map,base_quote2,date)
+ ifrate2isnotNone:
+ returnAmount(amt.number*rate1*rate2,target_currency)
+
+ # We failed to infer a conversion rate; return the amt.
+ returnamt
+
def convert_position(pos, target_currency, price_map, date=None):
- """Return the market value of a Position or Posting in a particular currency.
-
- In addition, if the rate from the position's currency to target_currency
- isn't available, an attempt is made to convert from its cost currency, if
- one is available.
-
- Args:
- pos: An instance of Position or Posting, equivalently.
- target_currency: The target currency to convert to.
- price_map: A dict of prices, as built by prices.build_price_map().
- date: A datetime.date instance to evaluate the value at, or None.
- Returns:
- An Amount, either with a successful value currency conversion, or if we
- could not convert the value, just the units, unmodified. (See get_value()
- above for details.)
- """
- cost = pos.cost
- value_currency = (
- (isinstance(cost, Cost) and cost.currency) or
- (hasattr(pos, 'price') and pos.price and pos.price.currency) or
- None)
- return convert_amount(pos.units, target_currency, price_map,
- date=date, via=(value_currency,))
-
+
defconvert_position(pos,target_currency,price_map,date=None):
+"""Return the market value of a Position or Posting in a particular currency.
+
+ In addition, if the rate from the position's currency to target_currency
+ isn't available, an attempt is made to convert from its cost currency, if
+ one is available.
+
+ Args:
+ pos: An instance of Position or Posting, equivalently.
+ target_currency: The target currency to convert to.
+ price_map: A dict of prices, as built by prices.build_price_map().
+ date: A datetime.date instance to evaluate the value at, or None.
+ Returns:
+ An Amount, either with a successful value currency conversion, or if we
+ could not convert the value, just the units, unmodified. (See get_value()
+ above for details.)
+ """
+ cost=pos.cost
+ value_currency=(
+ (isinstance(cost,Cost)andcost.currency)
+ or(hasattr(pos,"price")andpos.priceandpos.price.currency)
+ orNone
+ )
+ returnconvert_amount(
+ pos.units,target_currency,price_map,date=date,via=(value_currency,)
+ )
+
def get_cost(pos):
- """Return the total cost of a Position or Posting.
-
- Args:
- pos: An instance of Position or Posting, equivalently.
- Returns:
- An Amount.
- """
- assert isinstance(pos, Position) or type(pos).__name__ == 'Posting'
- cost = pos.cost
- return (Amount(cost.number * pos.units.number, cost.currency)
- if (isinstance(cost, Cost) and isinstance(cost.number, Decimal))
- else pos.units)
-
+
defget_cost(pos):
+"""Return the total cost of a Position or Posting.
+
+ Args:
+ pos: An instance of Position or Posting, equivalently.
+ Returns:
+ An Amount.
+ """
+ assertisinstance(pos,Position)ortype(pos).__name__=="Posting"
+ cost=pos.cost
+ return(
+ Amount(cost.number*pos.units.number,cost.currency)
+ if(isinstance(cost,Cost)andisinstance(cost.number,Decimal))
+ elsepos.units
+ )
+
def get_units(pos):
- """Return the units of a Position or Posting.
-
- Args:
- pos: An instance of Position or Posting, equivalently.
- Returns:
- An Amount.
- """
- assert isinstance(pos, Position) or type(pos).__name__ == 'Posting'
- return pos.units
-
+
defget_units(pos):
+"""Return the units of a Position or Posting.
+
+ Args:
+ pos: An instance of Position or Posting, equivalently.
+ Returns:
+ An Amount.
+ """
+ assertisinstance(pos,Position)ortype(pos).__name__=="Posting"
+ returnpos.units
+
Note that if the position is not held at cost, this does not convert
anything, even if a price is available in the 'price_map'. We don't specify
a target currency here. If you're attempting to make such a conversion, see
-convert_*() functions below.
+convert_*() functions below. However, is the object is a posting and it
+has a price, we will use that price to infer the target currency and those
+will be converted.
@@ -5025,6 +5154,9 @@
pos – An instance of Position or Posting, equivalently.
price_map – A dict of prices, as built by prices.build_price_map().
date – A datetime.date instance to evaluate the value at, or None.
+
output_date_prices – An optional output list of (date, price). If this list
+is provided, it will be appended to (mutated) to output the price
+pulled in making the conversions.
@@ -5053,46 +5185,55 @@
Source code in beancount/core/convert.py
-
def get_value(pos, price_map, date=None):
- """Return the market value of a Position or Posting.
-
- Note that if the position is not held at cost, this does not convert
- anything, even if a price is available in the 'price_map'. We don't specify
- a target currency here. If you're attempting to make such a conversion, see
- ``convert_*()`` functions below.
-
- Args:
- pos: An instance of Position or Posting, equivalently.
- price_map: A dict of prices, as built by prices.build_price_map().
- date: A datetime.date instance to evaluate the value at, or None.
- Returns:
- An Amount, either with a successful value currency conversion, or if we
- could not convert the value, just the units, unmodified. This is designed
- so that you could reduce an inventory with this and not lose any
- information silently in case of failure to convert (possibly due to an
- empty price map). Compare the returned currency to that of the input
- position if you need to check for success.
- """
- assert isinstance(pos, Position) or type(pos).__name__ == 'Posting'
- units = pos.units
- cost = pos.cost
-
- # Try to infer what the cost/price currency should be.
- value_currency = (
- (isinstance(cost, Cost) and cost.currency) or
- (hasattr(pos, 'price') and pos.price and pos.price.currency) or
- None)
-
- if isinstance(value_currency, str):
- # We have a value currency; hit the price database.
- base_quote = (units.currency, value_currency)
- _, price_number = prices.get_price(price_map, base_quote, date)
- if price_number is not None:
- return Amount(units.number * price_number, value_currency)
-
- # We failed to infer a conversion rate; return the units.
- return units
-
+
defget_value(pos,price_map,date=None,output_date_prices=None):
+"""Return the market value of a Position or Posting.
+
+ Note that if the position is not held at cost, this does not convert
+ anything, even if a price is available in the 'price_map'. We don't specify
+ a target currency here. If you're attempting to make such a conversion, see
+ ``convert_*()`` functions below. However, is the object is a posting and it
+ has a price, we will use that price to infer the target currency and those
+ will be converted.
+
+ Args:
+ pos: An instance of Position or Posting, equivalently.
+ price_map: A dict of prices, as built by prices.build_price_map().
+ date: A datetime.date instance to evaluate the value at, or None.
+ output_date_prices: An optional output list of (date, price). If this list
+ is provided, it will be appended to (mutated) to output the price
+ pulled in making the conversions.
+ Returns:
+ An Amount, either with a successful value currency conversion, or if we
+ could not convert the value, just the units, unmodified. This is designed
+ so that you could reduce an inventory with this and not lose any
+ information silently in case of failure to convert (possibly due to an
+ empty price map). Compare the returned currency to that of the input
+ position if you need to check for success.
+
+ """
+ assertisinstance(pos,Position)ortype(pos).__name__=="Posting"
+ units=pos.units
+ cost=pos.cost
+
+ # Try to infer what the cost/price currency should be.
+ value_currency=(
+ (isinstance(cost,Cost)andcost.currency)
+ or(hasattr(pos,"price")andpos.priceandpos.price.currency)
+ orNone
+ )
+
+ ifisinstance(value_currency,str):
+ # We have a value currency; hit the price database.
+ base_quote=(units.currency,value_currency)
+ price_date,price_number=prices.get_price(price_map,base_quote,date)
+ ifoutput_date_pricesisnotNone:
+ output_date_prices.append((price_date,price_number))
+ ifprice_numberisnotNone:
+ returnAmount(units.number*price_number,value_currency)
+
+ # We failed to infer a conversion rate; return the units.
+ returnunits
+
def get_weight(pos):
- """Return the weight of a Position or Posting.
-
- This is the amount that will need to be balanced from a posting of a
- transaction.
-
- This is a *key* element of the semantics of transactions in this software. A
- balance amount is the amount used to check the balance of a transaction.
- Here are all relevant examples, with the amounts used to balance the
- postings:
-
- Assets:Account 5234.50 USD -> 5234.50 USD
- Assets:Account 3877.41 EUR @ 1.35 USD -> 5234.50 USD
- Assets:Account 10 HOOL {523.45 USD} -> 5234.50 USD
- Assets:Account 10 HOOL {523.45 USD} @ 545.60 CAD -> 5234.50 USD
-
- Args:
- pos: An instance of Position or Posting, equivalently.
- Returns:
- An Amount.
- """
- assert isinstance(pos, Position) or type(pos).__name__ == 'Posting'
- units = pos.units
- cost = pos.cost
-
- # It the object has a cost, use that as the weight, to balance.
- if isinstance(cost, Cost) and isinstance(cost.number, Decimal):
- weight = Amount(cost.number * pos.units.number, cost.currency)
- else:
- # Otherwise use the postings.
- weight = units
-
- # Unless there is a price available; use that if present.
- if not isinstance(pos, Position):
- price = pos.price
- if price is not None:
- # Note: Here we could assert that price.currency == units.currency.
- if price.number is MISSING or units.number is MISSING:
- converted_number = MISSING
- else:
- converted_number = price.number * units.number
- weight = Amount(converted_number, price.currency)
-
- return weight
-
+
defget_weight(pos):
+"""Return the weight of a Position or Posting.
+
+ This is the amount that will need to be balanced from a posting of a
+ transaction.
+
+ This is a *key* element of the semantics of transactions in this software. A
+ balance amount is the amount used to check the balance of a transaction.
+ Here are all relevant examples, with the amounts used to balance the
+ postings:
+
+ Assets:Account 5234.50 USD -> 5234.50 USD
+ Assets:Account 3877.41 EUR @ 1.35 USD -> 5234.50 USD
+ Assets:Account 10 HOOL {523.45 USD} -> 5234.50 USD
+ Assets:Account 10 HOOL {523.45 USD} @ 545.60 CAD -> 5234.50 USD
+
+ Args:
+ pos: An instance of Position or Posting, equivalently.
+ Returns:
+ An Amount.
+ """
+ assertisinstance(pos,Position)ortype(pos).__name__=="Posting"
+ units=pos.units
+ cost=pos.cost
+
+ # It the object has a cost, use that as the weight, to balance.
+ ifisinstance(cost,Cost)andisinstance(cost.number,Decimal):
+ weight=Amount(cost.number*pos.units.number,cost.currency)
+ else:
+ # Otherwise use the postings.
+ weight=units
+
+ # Unless there is a price available; use that if present.
+ ifnotisinstance(pos,Position):
+ price=pos.price
+ ifpriceisnotNone:
+ # Note: Here we could assert that price.currency == units.currency.
+ ifprice.numberisMISSINGorunits.numberisMISSING:
+ converted_number=MISSING
+ else:
+ converted_number=price.number*units.number
+ weight=Amount(converted_number,price.currency)
+
+ returnweight
+
A "check the balance of this account" directive. This directive asserts that
+the declared account should have a known number of units of a particular
+currency at the beginning of its date. This is essentially an assertion, and
+corresponds to the final "Statement Balance" line of a real-world statement.
+These assertions act as checkpoints to help ensure that you have entered your
+transactions correctly.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the account whose balance to check at the given date.
+
+
+
amount
+
Amount
+
An Amount, the number of units of the given currency you're
+expecting 'account' to have at this date.
+
+
+
diff_amount
+
Optional[beancount.core.amount.Amount]
+
None if the balance check succeeds. This value is set to
+an Amount instance if the balance fails, the amount of the difference.
+
+
+
tolerance
+
Optional[decimal.Decimal]
+
A Decimal object, the amount of tolerance to use in the
+verification.
An optional commodity declaration directive. Commodities generally do not need
+to be declared, but they may, and this is mainly created as intended to be
+used to attach meta-data on a commodity name. Whenever a plugin needs
+per-commodity meta-data, you would define such a commodity directive. Another
+use is to define a commodity that isn't otherwise (yet) used anywhere in an
+input file. (At the moment the date is meaningless but is specified for
+coherence with all the other directives; if you can think of a good use case,
+let us know).
A custom directive. This directive can be used to implement new experimental
+dated features in the Beancount file. This is meant as an intermediate measure
+to be used when you would need to implement a new directive in a plugin. These
+directives will be parsed liberally... any list of tokens are supported. All
+that is required is some unique name for them that acts as a "type". These
+directives are included in the stream and a plugin should be able to gather
+them.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
type
+
str
+
A string that represents the type of the directive.
+
+
+
values
+
List
+
A list of values of various simple types supported by the grammar.
+(Note that this list is not enforced to be consistent for all directives
+of the same type by the parser.)
A document file declaration directive. This directive is used to attach a
+statement to an account, at a particular date. A typical usage would be to
+render PDF files or scans of your bank statements into the account's journal.
+While you can explicitly create those directives in the input syntax, it is
+much more convenient to provide Beancount with a root directory to search for
+filenames in a hierarchy mirroring the chart of accounts, filenames which
+should match the following dated format: "YYYY-MM-DD.*". See options for
+detail. Beancount will automatically create these documents directives based
+on the file hierarchy, and you can get them by parsing the list of entries.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the account which the statement or document is associated
+with.
+
+
+
filename
+
str
+
The absolute filename of the document file.
+
+
+
tags
+
Optional[Set]
+
A set of tag strings (without the '#'), or None, if an empty set.
+
+
+
links
+
Optional[Set]
+
A set of link strings (without the '^'), or None, if an empty set.
An "event value change" directive. These directives are used as string
+variables that have different values over time. You can use these to track an
+address, your location, your current employer, anything you like. The kind of
+reporting that is made of these generic events is based on days and a
+timeline. For instance, if you need to track the number of days you spend in
+each country or state, create a "location" event and whenever you travel, add
+an event directive to indicate its new value. You should be able to write
+simple scripts against those in order to compute if you were present somewhere
+for a particular number of days. Here's an illustrative example usage, in
+order to maintain your health insurance coverage in Canada, you need to be
+present in the country for 183 days or more, excluding trips of less than 30
+days. There is a similar test to be done in the US by aliens to figure out if
+they need to be considered as residents for tax purposes (the so-called
+"substantial presence test"). By integrating these directives into your
+bookkeeping, you can easily have a little program that computes the tests for
+you. This is, of course, entirely optional and somewhat auxiliary to the main
+purpose of double-entry bookkeeping, but correlates strongly with the
+transactions you insert in it, and so it's a really convenient thing to have
+in the same input file.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
"type"
+
+
A short string, typically a single lowercase word, that defines a
+unique variable whose value changes over time. For example, 'location'.
+
+
+
description
+
str
+
A free-form string, the value of the variable as of the date
+of the transaction.
A note directive, a general note that is attached to an account. These are
+used to attach text at a particular date in a specific account. The notes can
+be anything; a typical use would be to jot down an answer from a phone call to
+the institution represented by the account. It should show up in an account's
+journal. If you don't want this rendered, use the comment syntax in the input
+file, which does not get parsed and stored.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the account which the note is to be attached to. This is
+never None, notes always have an account they correspond to.
+
+
+
comment
+
str
+
A free-form string, the text of the note. This can be long if you
+want it to.
A string, the name of the account that is being opened.
+
+
+
currencies
+
List[str]
+
A list of strings, currencies that are allowed in this account.
+May be None, in which case it means that there are no restrictions on which
+currencies may be stored in this account.
+
+
+
booking
+
Optional[beancount.core.data.Booking]
+
A Booking enum, the booking method to use to disambiguate
+postings to this account (when zero or more than one postings match the
+specification), or None if not specified. In practice, this attribute will
+be should be left unspecified (None) in the vast majority of cases. See
+Booking below for a selection of valid methods.
A "pad this account with this other account" directive. This directive
+automatically inserts transactions that will make the next chronological
+balance directive succeeds. It can be used to fill in missing date ranges of
+transactions, as a convenience. You don't have to use this, it's sugar coating
+in case you need it, while you're entering past history into your Ledger.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the name of the account which needs to be filled.
+
+
+
source_account
+
str
+
A string, the name of the account which is used to debit from
+in order to fill 'account'.
Postings are contained in Transaction entries. These represent the individual
+legs of a transaction. Note: a posting may only appear within a single entry
+(multiple transactions may not share a Posting instance), and that's what the
+entry field should be set to.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
account
+
str
+
A string, the account that is modified by this posting.
+
+
+
units
+
Optional[beancount.core.amount.Amount]
+
An Amount, the units of the position, or None if it is to be
+inferred from the other postings in the transaction.
A Cost or CostSpec instances, the units of the position.
+
+
+
price
+
Optional[beancount.core.amount.Amount]
+
An Amount, the price at which the position took place, or
+None, where not relevant. Providing a price member to a posting
+automatically adds a price in the prices database at the date of the
+transaction.
+
+
+
flag
+
Optional[str]
+
An optional flag, a one-character string or None, which is to be
+associated with the posting. Most postings don't have a flag, but it can
+be convenient to mark a particular posting as problematic or pending to
+be reconciled for a future import of its account.
+
+
+
meta
+
Optional[Dict[str, Any]]
+
A dict of strings to values, the metadata that was attached
+specifically to that posting, or None, if not provided. In practice, most
+of the instances will be unlikely to have metadata.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Posting.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Posting.__new__(_cls,account,units,cost,price,flag,meta)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Posting(account, units, cost, price, flag, meta)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Posting.__repr__(self)
+
+
+ special
+
+
+
A price declaration directive. This establishes the price of a currency in
+terms of another currency as of the directive's date. A history of the prices
+for each currency pairs is built and can be queried within the bookkeeping
+system. Note that because Beancount does not store any data at time-of-day
+resolution, it makes no sense to have multiple price directives at the same
+date. (Beancount will not attempt to solve this problem; this is beyond the
+general scope of double-entry bookkeeping and if you need to build a day
+trading system, you should probably use something else).
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
currency: A string, the currency that is being priced, e.g. HOOL.
+ amount: An instance of Amount, the number of units and currency that
+ 'currency' is worth, for instance 1200.12 USD.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Price.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Price.__new__(_cls,meta,date,currency,amount)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Price(meta, date, currency, amount)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Price.__repr__(self)
+
+
+ special
+
+
+
A named query declaration. This directive is used to create pre-canned queries
+that can then be automatically run or made available to the shell, or perhaps be
+rendered as part of a web interface. The purpose of this routine is to define
+useful queries for the context of the particular given Beancount input file.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
The date at which this query should be run. All directives following
+this date will be ignored automatically. This is essentially equivalent to
+the CLOSE modifier in the shell syntax.
+
+
+
name
+
str
+
A string, the unique identifier for the query.
+
+
+
query_string
+
str
+
The SQL query string to be run or made available.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Query.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Query.__new__(_cls,meta,date,name,query_string)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Query(meta, date, name, query_string)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Query.__repr__(self)
+
+
+ special
+
+
+
A transaction! This is the main type of object that we manipulate, and the
+entire reason this whole project exists in the first place, because
+representing these types of structures with a spreadsheet is difficult.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
flag
+
str
+
A single-character string or None. This user-specified string
+represents some custom/user-defined state of the transaction. You can use
+this for various purposes. Otherwise common, pre-defined flags are defined
+under beancount.core.flags, to flags transactions that are automatically
+generated.
+
+
+
payee
+
Optional[str]
+
A free-form string that identifies the payee, or None, if absent.
+
+
+
narration
+
str
+
A free-form string that provides a description for the transaction.
+All transactions have at least a narration string, this is never None.
+
+
+
tags
+
FrozenSet
+
A set of tag strings (without the '#'), or EMPTY_SET.
+
+
+
links
+
FrozenSet
+
A set of link strings (without the '^'), or EMPTY_SET.
+
+
+
postings
+
List[beancount.core.data.Posting]
+
A list of Posting instances, the legs of this transaction. See the
+doc under Posting above.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Transaction.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Transaction.__new__(_cls,meta,date,flag,payee,narration,tags,links,postings)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Transaction(meta, date, flag, payee, narration, tags, links, postings)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.Transaction.__repr__(self)
+
+
+ special
+
+
+
A pair of a Posting and its parent Transaction. This is inserted as
+temporaries in lists of postings-of-entries, which is the product of a
+realization.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
txn
+
Transaction
+
The parent Transaction instance.
+
+
+
posting
+
Posting
+
The Posting instance.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.TxnPosting.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.TxnPosting.__new__(_cls,txn,posting)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of TxnPosting(txn, posting)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.TxnPosting.__repr__(self)
+
+
+ special
+
+
+
A "check the balance of this account" directive. This directive asserts that
+the declared account should have a known number of units of a particular
+currency at the beginning of its date. This is essentially an assertion, and
+corresponds to the final "Statement Balance" line of a real-world statement.
+These assertions act as checkpoints to help ensure that you have entered your
+transactions correctly.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the account whose balance to check at the given date.
+
+
+
amount
+
Amount
+
An Amount, the number of units of the given currency you're
+expecting 'account' to have at this date.
+
+
+
diff_amount
+
Optional[beancount.core.amount.Amount]
+
None if the balance check succeeds. This value is set to
+an Amount instance if the balance fails, the amount of the difference.
+
+
+
tolerance
+
Optional[decimal.Decimal]
+
A Decimal object, the amount of tolerance to use in the
+verification.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Balance.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Balance.__new__(_cls,meta,date,account,amount,tolerance,diff_amount)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Balance(meta, date, account, amount, tolerance, diff_amount)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Balance.__repr__(self)
+
+
+ special
+
+
+
An optional commodity declaration directive. Commodities generally do not need
+to be declared, but they may, and this is mainly created as intended to be
+used to attach meta-data on a commodity name. Whenever a plugin needs
+per-commodity meta-data, you would define such a commodity directive. Another
+use is to define a commodity that isn't otherwise (yet) used anywhere in an
+input file. (At the moment the date is meaningless but is specified for
+coherence with all the other directives; if you can think of a good use case,
+let us know).
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
currency
+
str
+
A string, the commodity under consideration.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Commodity.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Commodity.__new__(_cls,meta,date,currency)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Commodity(meta, date, currency)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Commodity.__repr__(self)
+
+
+ special
+
+
+
A custom directive. This directive can be used to implement new experimental
+dated features in the Beancount file. This is meant as an intermediate measure
+to be used when you would need to implement a new directive in a plugin. These
+directives will be parsed liberally... any list of tokens are supported. All
+that is required is some unique name for them that acts as a "type". These
+directives are included in the stream and a plugin should be able to gather
+them.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
type
+
str
+
A string that represents the type of the directive.
+
+
+
values
+
List
+
A list of values of various simple types supported by the grammar.
+(Note that this list is not enforced to be consistent for all directives
+of the same type by the parser.)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Custom.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Custom.__new__(_cls,meta,date,type,values)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Custom(meta, date, type, values)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Custom.__repr__(self)
+
+
+ special
+
+
+
A document file declaration directive. This directive is used to attach a
+statement to an account, at a particular date. A typical usage would be to
+render PDF files or scans of your bank statements into the account's journal.
+While you can explicitly create those directives in the input syntax, it is
+much more convenient to provide Beancount with a root directory to search for
+filenames in a hierarchy mirroring the chart of accounts, filenames which
+should match the following dated format: "YYYY-MM-DD.*". See options for
+detail. Beancount will automatically create these documents directives based
+on the file hierarchy, and you can get them by parsing the list of entries.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the account which the statement or document is associated
+with.
+
+
+
filename
+
str
+
The absolute filename of the document file.
+
+
+
tags
+
Optional[Set]
+
A set of tag strings (without the '#'), or None, if an empty set.
+
+
+
links
+
Optional[Set]
+
A set of link strings (without the '^'), or None, if an empty set.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Document.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Document.__new__(_cls,meta,date,account,filename,tags,links)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Document(meta, date, account, filename, tags, links)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Document.__repr__(self)
+
+
+ special
+
+
+
An "event value change" directive. These directives are used as string
+variables that have different values over time. You can use these to track an
+address, your location, your current employer, anything you like. The kind of
+reporting that is made of these generic events is based on days and a
+timeline. For instance, if you need to track the number of days you spend in
+each country or state, create a "location" event and whenever you travel, add
+an event directive to indicate its new value. You should be able to write
+simple scripts against those in order to compute if you were present somewhere
+for a particular number of days. Here's an illustrative example usage, in
+order to maintain your health insurance coverage in Canada, you need to be
+present in the country for 183 days or more, excluding trips of less than 30
+days. There is a similar test to be done in the US by aliens to figure out if
+they need to be considered as residents for tax purposes (the so-called
+"substantial presence test"). By integrating these directives into your
+bookkeeping, you can easily have a little program that computes the tests for
+you. This is, of course, entirely optional and somewhat auxiliary to the main
+purpose of double-entry bookkeeping, but correlates strongly with the
+transactions you insert in it, and so it's a really convenient thing to have
+in the same input file.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
"type"
+
+
A short string, typically a single lowercase word, that defines a
+unique variable whose value changes over time. For example, 'location'.
+
+
+
description
+
str
+
A free-form string, the value of the variable as of the date
+of the transaction.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Event.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Event.__new__(_cls,meta,date,type,description)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Event(meta, date, type, description)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Event.__repr__(self)
+
+
+ special
+
+
+
A note directive, a general note that is attached to an account. These are
+used to attach text at a particular date in a specific account. The notes can
+be anything; a typical use would be to jot down an answer from a phone call to
+the institution represented by the account. It should show up in an account's
+journal. If you don't want this rendered, use the comment syntax in the input
+file, which does not get parsed and stored.
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the account which the note is to be attached to. This is
+never None, notes always have an account they correspond to.
+
+
+
comment
+
str
+
A free-form string, the text of the note. This can be long if you
+want it to.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Note.__getnewargs__(self)
+
+
+ special
+
+
+
+
+
+
+
Return self as a plain tuple. Used by copy and pickle.
+
+
+ Source code in beancount/core/data.py
+
def__getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return_tuple(self)
+
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Note.__new__(_cls,meta,date,account,comment,tags,links)
+
+
+ special
+ staticmethod
+
+
+
+
+
+
+
Create new instance of Note(meta, date, account, comment, tags, links)
+
+
+
+
+
+
+
+
+
+
+
+
+beancount.core.data.dtypes.Note.__repr__(self)
+
+
+ special
+
+
+
A string, the name of the account that is being opened.
+
+
+
currencies
+
List[str]
+
A list of strings, currencies that are allowed in this account.
+May be None, in which case it means that there are no restrictions on which
+currencies may be stored in this account.
+
+
+
booking
+
Optional[beancount.core.data.Booking]
+
A Booking enum, the booking method to use to disambiguate
+postings to this account (when zero or more than one postings match the
+specification), or None if not specified. In practice, this attribute will
+be should be left unspecified (None) in the vast majority of cases. See
+Booking below for a selection of valid methods.
A "pad this account with this other account" directive. This directive
+automatically inserts transactions that will make the next chronological
+balance directive succeeds. It can be used to fill in missing date ranges of
+transactions, as a convenience. You don't have to use this, it's sugar coating
+in case you need it, while you're entering past history into your Ledger.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
account
+
str
+
A string, the name of the account which needs to be filled.
+
+
+
source_account
+
str
+
A string, the name of the account which is used to debit from
+in order to fill 'account'.
A price declaration directive. This establishes the price of a currency in
+terms of another currency as of the directive's date. A history of the prices
+for each currency pairs is built and can be queried within the bookkeeping
+system. Note that because Beancount does not store any data at time-of-day
+resolution, it makes no sense to have multiple price directives at the same
+date. (Beancount will not attempt to solve this problem; this is beyond the
+general scope of double-entry bookkeeping and if you need to build a day
+trading system, you should probably use something else).
+
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
currency: A string, the currency that is being priced, e.g. HOOL.
+ amount: An instance of Amount, the number of units and currency that
+ 'currency' is worth, for instance 1200.12 USD.
A named query declaration. This directive is used to create pre-canned queries
+that can then be automatically run or made available to the shell, or perhaps be
+rendered as part of a web interface. The purpose of this routine is to define
+useful queries for the context of the particular given Beancount input file.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
The date at which this query should be run. All directives following
+this date will be ignored automatically. This is essentially equivalent to
+the CLOSE modifier in the shell syntax.
A transaction! This is the main type of object that we manipulate, and the
+entire reason this whole project exists in the first place, because
+representing these types of structures with a spreadsheet is difficult.
+
Attributes:
+
+
+
+
Name
+
Type
+
Description
+
+
+
+
+
meta
+
Dict[str, Any]
+
See above.
+
+
+
date
+
date
+
See above.
+
+
+
flag
+
str
+
A single-character string or None. This user-specified string
+represents some custom/user-defined state of the transaction. You can use
+this for various purposes. Otherwise common, pre-defined flags are defined
+under beancount.core.flags, to flags transactions that are automatically
+generated.
+
+
+
payee
+
Optional[str]
+
A free-form string that identifies the payee, or None, if absent.
+
+
+
narration
+
str
+
A free-form string that provides a description for the transaction.
+All transactions have at least a narration string, this is never None.
+
+
+
tags
+
FrozenSet
+
A set of tag strings (without the '#'), or EMPTY_SET.
+
+
+
links
+
FrozenSet
+
A set of link strings (without the '^'), or EMPTY_SET.
+
+
+
postings
+
List[beancount.core.data.Posting]
+
A list of Posting instances, the legs of this transaction. See the
+doc under Posting above.
def create_simple_posting(entry, account, number, currency):
- """Create a simple posting on the entry, with just a number and currency (no cost).
-
- Args:
- entry: The entry instance to add the posting to.
- account: A string, the account to use on the posting.
- number: A Decimal number or string to use in the posting's Amount.
- currency: A string, the currency for the Amount.
- Returns:
- An instance of Posting, and as a side-effect the entry has had its list of
- postings modified with the new Posting instance.
- """
- if isinstance(account, str):
- pass
- if number is None:
- units = None
- else:
- if not isinstance(number, Decimal):
- number = D(number)
- units = Amount(number, currency)
- posting = Posting(account, units, None, None, None, None)
- if entry is not None:
- entry.postings.append(posting)
- return posting
-
+
defcreate_simple_posting(entry,account,number,currency):
+"""Create a simple posting on the entry, with just a number and currency (no cost).
+
+ Args:
+ entry: The entry instance to add the posting to.
+ account: A string, the account to use on the posting.
+ number: A Decimal number or string to use in the posting's Amount.
+ currency: A string, the currency for the Amount.
+ Returns:
+ An instance of Posting, and as a side-effect the entry has had its list of
+ postings modified with the new Posting instance.
+ """
+ ifisinstance(account,str):
+ pass
+ ifnumberisNone:
+ units=None
+ else:
+ ifnotisinstance(number,Decimal):
+ number=D(number)
+ units=Amount(number,currency)
+ posting=Posting(account,units,None,None,None,None)
+ ifentryisnotNone:
+ entry.postings.append(posting)
+ returnposting
+
def create_simple_posting_with_cost(entry, account,
- number, currency,
- cost_number, cost_currency):
- """Create a simple posting on the entry, with just a number and currency (no cost).
-
- Args:
- entry: The entry instance to add the posting to.
- account: A string, the account to use on the posting.
- number: A Decimal number or string to use in the posting's Amount.
- currency: A string, the currency for the Amount.
- cost_number: A Decimal number or string to use for the posting's cost Amount.
- cost_currency: a string, the currency for the cost Amount.
- Returns:
- An instance of Posting, and as a side-effect the entry has had its list of
- postings modified with the new Posting instance.
- """
- if isinstance(account, str):
- pass
- if not isinstance(number, Decimal):
- number = D(number)
- if cost_number and not isinstance(cost_number, Decimal):
- cost_number = D(cost_number)
- units = Amount(number, currency)
- cost = Cost(cost_number, cost_currency, None, None)
- posting = Posting(account, units, cost, None, None, None)
- if entry is not None:
- entry.postings.append(posting)
- return posting
-
+
defcreate_simple_posting_with_cost(
+ entry,account,number,currency,cost_number,cost_currency
+):
+"""Create a simple posting on the entry, with just a number and currency (no cost).
+
+ Args:
+ entry: The entry instance to add the posting to.
+ account: A string, the account to use on the posting.
+ number: A Decimal number or string to use in the posting's Amount.
+ currency: A string, the currency for the Amount.
+ cost_number: A Decimal number or string to use for the posting's cost Amount.
+ cost_currency: a string, the currency for the cost Amount.
+ Returns:
+ An instance of Posting, and as a side-effect the entry has had its list of
+ postings modified with the new Posting instance.
+ """
+ ifisinstance(account,str):
+ pass
+ ifnotisinstance(number,Decimal):
+ number=D(number)
+ ifcost_numberandnotisinstance(cost_number,Decimal):
+ cost_number=D(cost_number)
+ units=Amount(number,currency)
+ cost=Cost(cost_number,cost_currency,None,None)
+ posting=Posting(account,units,cost,None,None,None)
+ ifentryisnotNone:
+ entry.postings.append(posting)
+ returnposting
+
def entry_sortkey(entry):
- """Sort-key for entries. We sort by date, except that checks
- should be placed in front of every list of entries of that same day,
- in order to balance linearly.
-
- Args:
- entry: An entry instance.
- Returns:
- A tuple of (date, integer, integer), that forms the sort key for the
- entry.
- """
- return (entry.date, SORT_ORDER.get(type(entry), 0), entry.meta["lineno"])
-
+
defentry_sortkey(entry):
+"""Sort-key for entries. We sort by date, except that checks
+ should be placed in front of every list of entries of that same day,
+ in order to balance linearly.
+
+ Args:
+ entry: An entry instance.
+ Returns:
+ A tuple of (date, integer, integer), that forms the sort key for the
+ entry.
+ """
+ return(entry.date,SORT_ORDER.get(type(entry),0),entry.meta["lineno"])
+
def filter_txns(entries):
- """A generator that yields only the Transaction instances.
-
- This is such an incredibly common operation that it deserves a terse
- filtering mechanism.
-
- Args:
- entries: A list of directives.
- Yields:
- A sorted list of only the Transaction directives.
- """
- for entry in entries:
- if isinstance(entry, Transaction):
- yield entry
-
+
deffilter_txns(entries):
+"""A generator that yields only the Transaction instances.
+
+ This is such an incredibly common operation that it deserves a terse
+ filtering mechanism.
+
+ Args:
+ entries: A list of directives.
+ Yields:
+ A sorted list of only the Transaction directives.
+ """
+ forentryinentries:
+ ifisinstance(entry,Transaction):
+ yieldentry
+
def find_closest(entries, filename, lineno):
- """Find the closest entry from entries to (filename, lineno).
-
- Args:
- entries: A list of directives.
- filename: A string, the name of the ledger file to look for. Be careful
- to provide the very same filename, and note that the parser stores the
- absolute path of the filename here.
- lineno: An integer, the line number closest after the directive we're
- looking for. This may be the exact/first line of the directive.
- Returns:
- The closest entry found in the given file for the given filename, or
- None, if none could be found.
- """
- min_diffline = sys.maxsize
- closest_entry = None
- for entry in entries:
- emeta = entry.meta
- if emeta["filename"] == filename and emeta["lineno"] > 0:
- diffline = lineno - emeta["lineno"]
- if 0 <= diffline < min_diffline:
- min_diffline = diffline
- closest_entry = entry
- return closest_entry
-
+
deffind_closest(entries,filename,lineno):
+"""Find the closest entry from entries to (filename, lineno).
+
+ Args:
+ entries: A list of directives.
+ filename: A string, the name of the ledger file to look for. Be careful
+ to provide the very same filename, and note that the parser stores the
+ absolute path of the filename here.
+ lineno: An integer, the line number closest after the directive we're
+ looking for. This may be the exact/first line of the directive.
+ Returns:
+ The closest entry found in the given file for the given filename, or
+ None, if none could be found.
+ """
+ min_diffline=sys.maxsize
+ closest_entry=None
+ forentryinentries:
+ emeta=entry.meta
+ ifemeta["filename"]==filenameandemeta["lineno"]>0:
+ diffline=lineno-emeta["lineno"]
+ if0<=diffline<min_diffline:
+ min_diffline=diffline
+ closest_entry=entry
+ returnclosest_entry
+
def has_entry_account_component(entry, component):
- """Return true if one of the entry's postings has an account component.
-
- Args:
- entry: A Transaction entry.
- component: A string, a component of an account name. For instance,
- ``Food`` in ``Expenses:Food:Restaurant``. All components are considered.
- Returns:
- Boolean: true if the component is in the account. Note that a component
- name must be whole, that is ``NY`` is not in ``Expenses:Taxes:StateNY``.
- """
- return (isinstance(entry, Transaction) and
- any(has_component(posting.account, component)
- for posting in entry.postings))
-
+
defhas_entry_account_component(entry,component):
+"""Return true if one of the entry's postings has an account component.
+
+ Args:
+ entry: A Transaction entry.
+ component: A string, a component of an account name. For instance,
+ ``Food`` in ``Expenses:Food:Restaurant``. All components are considered.
+ Returns:
+ Boolean: true if the component is in the account. Note that a component
+ name must be whole, that is ``NY`` is not in ``Expenses:Taxes:StateNY``.
+ """
+ returnisinstance(entry,Transaction)andany(
+ has_component(posting.account,component)forpostinginentry.postings
+ )
+
Yields:
- Instances of the dated directives, between the dates, and in the order in
- which they appear.
-
-
- Source code in beancount/core/data.py
-
def iter_entry_dates(entries, date_begin, date_end):
- """Iterate over the entries in a date window.
-
- Args:
- entries: A date-sorted list of dated directives.
- date_begin: A datetime.date instance, the first date to include.
- date_end: A datetime.date instance, one day beyond the last date.
- Yields:
- Instances of the dated directives, between the dates, and in the order in
- which they appear.
- """
- getdate = lambda entry: entry.date
- index_begin = bisect_left_with_key(entries, date_begin, key=getdate)
- index_end = bisect_left_with_key(entries, date_end, key=getdate)
- for index in range(index_begin, index_end):
- yield entries[index]
-
Create a directive class. Do not include default fields.
-This should probably be carried out through inheritance.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
name – A string, the capitalized name of the directive.
-
fields (List[Tuple]) – A string or the list of strings, names for the fields
-to add to the base tuple.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
<function NamedTuple at 0x78e868efb240> – A type object for the new directive type.
-
-
-
-
-
+ Instances of the dated directives, between the dates, and in the order in
+ which they appear.
+
Source code in beancount/core/data.py
-
def new_directive(clsname, fields: List[Tuple]) -> NamedTuple:
- """Create a directive class. Do not include default fields.
- This should probably be carried out through inheritance.
-
- Args:
- name: A string, the capitalized name of the directive.
- fields: A string or the list of strings, names for the fields
- to add to the base tuple.
- Returns:
- A type object for the new directive type.
- """
- return NamedTuple(
- clsname,
- [('meta', Meta), ('date', datetime.date)] + fields)
-
+
defiter_entry_dates(entries,date_begin,date_end):
+"""Iterate over the entries in a date window.
+
+ Args:
+ entries: A date-sorted list of dated directives.
+ date_begin: A datetime.date instance, the first date to include.
+ date_end: A datetime.date instance, one day beyond the last date.
+ Yields:
+ Instances of the dated directives, between the dates, and in the order in
+ which they appear.
+ """
+ getdate=lambdaentry:entry.date
+ index_begin=bisect_left_with_key(entries,date_begin,key=getdate)
+ index_end=bisect_left_with_key(entries,date_end,key=getdate)
+ forindexinrange(index_begin,index_end):
+ yieldentries[index]
+
def new_metadata(filename, lineno, kvlist=None):
- """Create a new metadata container from the filename and line number.
-
- Args:
- filename: A string, the filename for the creator of this directive.
- lineno: An integer, the line number where the directive has been created.
- kvlist: An optional container of key-values.
- Returns:
- A metadata dict.
- """
- meta = {'filename': filename,
- 'lineno': lineno}
- if kvlist:
- meta.update(kvlist)
- return meta
-
+
defnew_metadata(filename,lineno,kvlist=None):
+"""Create a new metadata container from the filename and line number.
+
+ Args:
+ filename: A string, the filename for the creator of this directive.
+ lineno: An integer, the line number where the directive has been created.
+ kvlist: An optional container of key-values.
+ Returns:
+ A metadata dict.
+ """
+ meta={"filename":filename,"lineno":lineno}
+ ifkvlist:
+ meta.update(kvlist)
+ returnmeta
+
def posting_has_conversion(posting):
- """Return true if this position involves a conversion.
-
- A conversion is when there is a price attached to the amount but no cost.
- This is used on transactions to convert between units.
-
- Args:
- posting: an instance of Posting
- Return:
- A boolean, true if this posting has a price conversion.
- """
- return (posting.cost is None and
- posting.price is not None)
-
+
defposting_has_conversion(posting):
+"""Return true if this position involves a conversion.
+
+ A conversion is when there is a price attached to the amount but no cost.
+ This is used on transactions to convert between units.
+
+ Args:
+ posting: an instance of Posting
+ Return:
+ A boolean, true if this posting has a price conversion.
+ """
+ returnposting.costisNoneandposting.priceisnotNone
+
def posting_sortkey(entry):
- """Sort-key for entries or postings. We sort by date, except that checks
- should be placed in front of every list of entries of that same day,
- in order to balance linearly.
-
- Args:
- entry: A Posting or entry instance
- Returns:
- A tuple of (date, integer, integer), that forms the sort key for the
- posting or entry.
- """
- if isinstance(entry, TxnPosting):
- entry = entry.txn
- return (entry.date, SORT_ORDER.get(type(entry), 0), entry.meta["lineno"])
-
+
defposting_sortkey(entry):
+"""Sort-key for entries or postings. We sort by date, except that checks
+ should be placed in front of every list of entries of that same day,
+ in order to balance linearly.
+
+ Args:
+ entry: A Posting or entry instance
+ Returns:
+ A tuple of (date, integer, integer), that forms the sort key for the
+ posting or entry.
+ """
+ ifisinstance(entry,TxnPosting):
+ entry=entry.txn
+ return(entry.date,SORT_ORDER.get(type(entry),0),entry.meta["lineno"])
+
def remove_account_postings(account, entries):
- """Remove all postings with the given account.
-
- Args:
- account: A string, the account name whose postings we want to remove.
- Returns:
- A list of entries without the rounding postings.
- """
- new_entries = []
- for entry in entries:
- if isinstance(entry, Transaction) and (
- any(posting.account == account for posting in entry.postings)):
- entry = entry._replace(postings=[posting
- for posting in entry.postings
- if posting.account != account])
- new_entries.append(entry)
- return new_entries
-
+
defremove_account_postings(account,entries):
+"""Remove all postings with the given account.
+
+ Args:
+ account: A string, the account name whose postings we want to remove.
+ Returns:
+ A list of entries without the rounding postings.
+ """
+ new_entries=[]
+ forentryinentries:
+ ifisinstance(entry,Transaction)and(
+ any(posting.account==accountforpostinginentry.postings)
+ ):
+ entry=entry._replace(
+ postings=[
+ postingforpostinginentry.postingsifposting.account!=account
+ ]
+ )
+ new_entries.append(entry)
+ returnnew_entries
+
def sanity_check_types(entry, allow_none_for_tags_and_links=False):
- """Check that the entry and its postings has all correct data types.
-
- Args:
- entry: An instance of one of the entries to be checked.
- allow_none_for_tags_and_links: A boolean, whether to allow plugins to
- generate Transaction objects with None as value for the 'tags' or 'links'
- attributes.
- Raises:
- AssertionError: If there is anything that is unexpected, raises an exception.
- """
- assert isinstance(entry, ALL_DIRECTIVES), "Invalid directive type"
- assert isinstance(entry.meta, dict), "Invalid type for meta"
- assert 'filename' in entry.meta, "Missing filename in metadata"
- assert 'lineno' in entry.meta, "Missing line number in metadata"
- assert isinstance(entry.date, datetime.date), "Invalid date type"
- if isinstance(entry, Transaction):
- assert isinstance(entry.flag, (NoneType, str)), "Invalid flag type"
- assert isinstance(entry.payee, (NoneType, str)), "Invalid payee type"
- assert isinstance(entry.narration, (NoneType, str)), "Invalid narration type"
- set_types = ((NoneType, set, frozenset)
- if allow_none_for_tags_and_links
- else (set, frozenset))
- assert isinstance(entry.tags, set_types), (
- "Invalid tags type: {}".format(type(entry.tags)))
- assert isinstance(entry.links, set_types), (
- "Invalid links type: {}".format(type(entry.links)))
- assert isinstance(entry.postings, list), "Invalid postings list type"
- for posting in entry.postings:
- assert isinstance(posting, Posting), "Invalid posting type"
- assert isinstance(posting.account, str), "Invalid account type"
- assert isinstance(posting.units, (Amount, NoneType)), "Invalid units type"
- assert isinstance(posting.cost, (Cost, CostSpec, NoneType)), "Invalid cost type"
- assert isinstance(posting.price, (Amount, NoneType)), "Invalid price type"
- assert isinstance(posting.flag, (str, NoneType)), "Invalid flag type"
-
+
defsanity_check_types(entry,allow_none_for_tags_and_links=False):
+"""Check that the entry and its postings has all correct data types.
+
+ Args:
+ entry: An instance of one of the entries to be checked.
+ allow_none_for_tags_and_links: A boolean, whether to allow plugins to
+ generate Transaction objects with None as value for the 'tags' or 'links'
+ attributes.
+ Raises:
+ AssertionError: If there is anything that is unexpected, raises an exception.
+ """
+ assertisinstance(entry,ALL_DIRECTIVES),"Invalid directive type"
+ assertisinstance(entry.meta,dict),"Invalid type for meta"
+ assert"filename"inentry.meta,"Missing filename in metadata"
+ assert"lineno"inentry.meta,"Missing line number in metadata"
+ assertisinstance(entry.date,datetime.date),"Invalid date type"
+ ifisinstance(entry,Transaction):
+ assertisinstance(entry.flag,(NoneType,str)),"Invalid flag type"
+ assertisinstance(entry.payee,(NoneType,str)),"Invalid payee type"
+ assertisinstance(entry.narration,(NoneType,str)),"Invalid narration type"
+ set_types=(
+ (NoneType,set,frozenset)
+ ifallow_none_for_tags_and_links
+ else(set,frozenset)
+ )
+ assertisinstance(entry.tags,set_types),"Invalid tags type: {}".format(
+ type(entry.tags)
+ )
+ assertisinstance(entry.links,set_types),"Invalid links type: {}".format(
+ type(entry.links)
+ )
+ assertisinstance(entry.postings,list),"Invalid postings list type"
+ forpostinginentry.postings:
+ assertisinstance(posting,Posting),"Invalid posting type"
+ assertisinstance(posting.account,str),"Invalid account type"
+ assertisinstance(posting.units,(Amount,NoneType)),"Invalid units type"
+ assertisinstance(posting.cost,(Cost,CostSpec,NoneType)),"Invalid cost type"
+ assertisinstance(posting.price,(Amount,NoneType)),"Invalid price type"
+ assertisinstance(posting.flag,(str,NoneType)),"Invalid flag type"
+
def sorted(entries):
- """A convenience to sort a list of entries, using entry_sortkey().
-
- Args:
- entries: A list of directives.
- Returns:
- A sorted list of directives.
- """
- return builtins.sorted(entries, key=entry_sortkey)
-
+
defsorted(entries):
+"""A convenience to sort a list of entries, using entry_sortkey().
+
+ Args:
+ entries: A list of directives.
+ Returns:
+ A sorted list of directives.
+ """
+ returnbuiltins.sorted(entries,key=entry_sortkey)
+
def transaction_has_conversion(transaction):
- """Given a Transaction entry, return true if at least one of
- the postings has a price conversion (without an associated
- cost). These are the source of non-zero conversion balances.
-
- Args:
- transaction: an instance of a Transaction entry.
- Returns:
- A boolean, true if this transaction contains at least one posting with a
- price conversion.
- """
- assert isinstance(transaction, Transaction), (
- "Invalid type of entry for transaction: {}".format(transaction))
- for posting in transaction.postings:
- if posting_has_conversion(posting):
- return True
- return False
-
+
deftransaction_has_conversion(transaction):
+"""Given a Transaction entry, return true if at least one of
+ the postings has a price conversion (without an associated
+ cost). These are the source of non-zero conversion balances.
+
+ Args:
+ transaction: an instance of a Transaction entry.
+ Returns:
+ A boolean, true if this transaction contains at least one posting with a
+ price conversion.
+ """
+ assertisinstance(
+ transaction,Transaction
+ ),"Invalid type of entry for transaction: {}".format(transaction)
+ forpostingintransaction.postings:
+ ifposting_has_conversion(posting):
+ returnTrue
+ returnFalse
+
Source code in beancount/core/display_context.py
-
def build(self,
- alignment=Align.NATURAL,
- precision=Precision.MOST_COMMON,
- commas=None,
- reserved=0):
- """Build a formatter for the given display context.
-
- Args:
- alignment: The desired alignment.
- precision: The desired precision.
- commas: Whether to render commas or not. If 'None', the default value carried
- by the context will be used.
- reserved: An integer, the number of extra digits to be allocated in
- the maximum width calculations.
- """
- if commas is None:
- commas = self.commas
- if alignment == Align.NATURAL:
- build_method = self._build_natural
- elif alignment == Align.RIGHT:
- build_method = self._build_right
- elif alignment == Align.DOT:
- build_method = self._build_dot
- else:
- raise ValueError("Unknown alignment: {}".format(alignment))
- fmtstrings = build_method(precision, commas, reserved)
-
- return DisplayFormatter(self, precision, fmtstrings)
-
+
defbuild(
+ self,
+ alignment=Align.NATURAL,
+ precision=Precision.MOST_COMMON,
+ commas=None,
+ reserved=0,
+):
+"""Build a formatter for the given display context.
+
+ Args:
+ alignment: The desired alignment.
+ precision: The desired precision.
+ commas: Whether to render commas or not. If 'None', the default value carried
+ by the context will be used.
+ reserved: An integer, the number of extra digits to be allocated in
+ the maximum width calculations.
+ """
+ ifcommasisNone:
+ commas=self.commas
+ ifalignment==Align.NATURAL:
+ build_method=self._build_natural
+ elifalignment==Align.RIGHT:
+ build_method=self._build_right
+ elifalignment==Align.DOT:
+ build_method=self._build_dot
+ else:
+ raiseValueError("Unknown alignment: {}".format(alignment))
+ fmtstrings=build_method(precision,commas,reserved)
+
+ returnDisplayFormatter(self,precision,fmtstrings)
+
Source code in beancount/core/display_context.py
-
def quantize(self, number, currency, precision=Precision.MOST_COMMON):
- """Quantize the given number to the given precision.
-
- Args:
- number: A Decimal instance, the number to be quantized.
- currency: A currency string.
- precision: Which precision to use.
- Returns:
- A Decimal instance, the quantized number.
- """
- assert isinstance(number, Decimal), "Invalid data: {}".format(number)
- ccontext = self.ccontexts[currency]
- num_fractional_digits = ccontext.get_fractional(precision)
- if num_fractional_digits is None:
- # Note: We could probably logging.warn() this situation here.
- return number
- qdigit = Decimal(1).scaleb(-num_fractional_digits)
- return number.quantize(qdigit)
-
+
defquantize(self,number,currency,precision=Precision.MOST_COMMON):
+"""Quantize the given number to the given precision.
+
+ Args:
+ number: A Decimal instance, the number to be quantized.
+ currency: A currency string.
+ precision: Which precision to use.
+ Returns:
+ A Decimal instance, the quantized number.
+ """
+ assertisinstance(number,Decimal),"Invalid data: {}".format(number)
+ ccontext=self.ccontexts[currency]
+ num_fractional_digits=ccontext.get_fractional(precision)
+ ifnum_fractional_digitsisNone:
+ # Note: We could probably logging.warn() this situation here.
+ returnnumber
+ qdigit=Decimal(1).scaleb(-num_fractional_digits)
+
+ withdecimal.localcontext()asctx:
+ # Allow precision for numbers as large as 1 billion in addition to
+ # the required number of fractional digits.
+ #
+ # TODO(blais): Review this to assess performance impact, and whether
+ # we could fold this outside a calling loop.
+ ctx.prec=num_fractional_digits+9
+ returnnumber.quantize(qdigit)
+
Source code in beancount/core/display_context.py
-
def update(self, number, currency='__default__'):
- """Update the builder with the given number for the given currency.
-
- Args:
- number: An instance of Decimal to consider for this currency.
- currency: An optional string, the currency this numbers applies to.
- """
- self.ccontexts[currency].update(number)
-
+
defupdate(self,number,currency="__default__"):
+"""Update the builder with the given number for the given currency.
+
+ Args:
+ number: An instance of Decimal to consider for this currency.
+ currency: An optional string, the currency this numbers applies to.
+ """
+ self.ccontexts[currency].update(number)
+
Update the builder with the other given DisplayContext.
+
+
+
+
+
+
+
+
+
Parameters:
+
+
+
other – Another DisplayContext.
+
+
+
+
+
+
+ Source code in beancount/core/display_context.py
+
defupdate_from(self,other):
+"""Update the builder with the other given DisplayContext.
+
+ Args:
+ other: Another DisplayContext.
+ """
+ forcurrency,ccontextinother.ccontexts.items():
+ self.ccontexts[currency].update_from(ccontext)
+
def max(self):
- """Return the minimum value seen in the distribution.
-
- Returns:
- An element of the value type, or None, if the distribution was empty.
- """
- if not self.hist:
- return None
- value, _ = sorted(self.hist.items())[-1]
- return value
-
+
defmax(self):
+"""Return the minimum value seen in the distribution.
+
+ Returns:
+ An element of the value type, or None, if the distribution was empty.
+ """
+ ifnotself.hist:
+ returnNone
+ value,_=sorted(self.hist.items())[-1]
+ returnvalue
+
def min(self):
- """Return the minimum value seen in the distribution.
-
- Returns:
- An element of the value type, or None, if the distribution was empty.
- """
- if not self.hist:
- return None
- value, _ = sorted(self.hist.items())[0]
- return value
-
+
defmin(self):
+"""Return the minimum value seen in the distribution.
+
+ Returns:
+ An element of the value type, or None, if the distribution was empty.
+ """
+ ifnotself.hist:
+ returnNone
+ value,_=sorted(self.hist.items())[0]
+ returnvalue
+
def mode(self):
- """Return the mode of the distribution.
-
- Returns:
- An element of the value type, or None, if the distribution was empty.
- """
- if not self.hist:
- return None
- max_value = 0
- max_count = 0
- for value, count in sorted(self.hist.items()):
- if count >= max_count:
- max_count = count
- max_value = value
- return max_value
-
+
defmode(self):
+"""Return the mode of the distribution.
+
+ Returns:
+ An element of the value type, or None, if the distribution was empty.
+ """
+ ifnotself.hist:
+ returnNone
+ max_value=0
+ max_count=0
+ forvalue,countinsorted(self.hist.items()):
+ ifcount>=max_count:
+ max_count=count
+ max_value=value
+ returnmax_value
+
Add samples from the other distribution to this one.
+
+
+
+
+
+
+
+
+
Parameters:
+
+
+
other – Another distribution.
+
+
+
+
+
+
+ Source code in beancount/core/distribution.py
+
defupdate_from(self,other):
+"""Add samples from the other distribution to this one.
+
+ Args:
+ other: Another distribution.
+ """
+ forvalue,countinother.hist.items():
+ self.hist[value]+=count
+
def _one(_, entry):
- """Process directives with a single account attribute.
-
- Args:
- entry: An instance of a directive.
- Returns:
- The single account of this directive.
- """
- return (entry.account,)
-
+
def_one(_,entry):
+"""Process directives with a single account attribute.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ The single account of this directive.
+ """
+ return(entry.account,)
+
def _one(_, entry):
- """Process directives with a single account attribute.
-
- Args:
- entry: An instance of a directive.
- Returns:
- The single account of this directive.
- """
- return (entry.account,)
-
+
def_one(_,entry):
+"""Process directives with a single account attribute.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ The single account of this directive.
+ """
+ return(entry.account,)
+
def _zero(_, entry):
- """Process directives with no accounts.
-
- Args:
- entry: An instance of a directive.
- Returns:
- An empty list
- """
- return ()
-
+
def_zero(_,entry):
+"""Process directives with no accounts.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ An empty list
+ """
+ return()
+
def _zero(_, entry):
- """Process directives with no accounts.
-
- Args:
- entry: An instance of a directive.
- Returns:
- An empty list
- """
- return ()
-
+
def_zero(_,entry):
+"""Process directives with no accounts.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ An empty list
+ """
+ return()
+
def _one(_, entry):
- """Process directives with a single account attribute.
-
- Args:
- entry: An instance of a directive.
- Returns:
- The single account of this directive.
- """
- return (entry.account,)
-
+
def_one(_,entry):
+"""Process directives with a single account attribute.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ The single account of this directive.
+ """
+ return(entry.account,)
+
def _zero(_, entry):
- """Process directives with no accounts.
-
- Args:
- entry: An instance of a directive.
- Returns:
- An empty list
- """
- return ()
-
+
def_zero(_,entry):
+"""Process directives with no accounts.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ An empty list
+ """
+ return()
+
def _one(_, entry):
- """Process directives with a single account attribute.
-
- Args:
- entry: An instance of a directive.
- Returns:
- The single account of this directive.
- """
- return (entry.account,)
-
+
def_one(_,entry):
+"""Process directives with a single account attribute.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ The single account of this directive.
+ """
+ return(entry.account,)
+
def _one(_, entry):
- """Process directives with a single account attribute.
-
- Args:
- entry: An instance of a directive.
- Returns:
- The single account of this directive.
- """
- return (entry.account,)
-
+
def_one(_,entry):
+"""Process directives with a single account attribute.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ The single account of this directive.
+ """
+ return(entry.account,)
+
def Pad(_, entry):
- """Process a Pad directive.
-
- Args:
- entry: An instance of Pad.
- Returns:
- The two accounts of the Pad directive.
- """
- return (entry.account, entry.source_account)
-
+
defPad(_,entry):
+"""Process a Pad directive.
+
+ Args:
+ entry: An instance of Pad.
+ Returns:
+ The two accounts of the Pad directive.
+ """
+ return(entry.account,entry.source_account)
+
def _zero(_, entry):
- """Process directives with no accounts.
-
- Args:
- entry: An instance of a directive.
- Returns:
- An empty list
- """
- return ()
-
+
def_zero(_,entry):
+"""Process directives with no accounts.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ An empty list
+ """
+ return()
+
def _zero(_, entry):
- """Process directives with no accounts.
-
- Args:
- entry: An instance of a directive.
- Returns:
- An empty list
- """
- return ()
-
+
def_zero(_,entry):
+"""Process directives with no accounts.
+
+ Args:
+ entry: An instance of a directive.
+ Returns:
+ An empty list
+ """
+ return()
+
def Transaction(_, entry):
- """Process a Transaction directive.
-
- Args:
- entry: An instance of Transaction.
- Yields:
- The accounts of the legs of the transaction.
- """
- for posting in entry.postings:
- yield posting.account
-
+
defTransaction(_,entry):
+"""Process a Transaction directive.
+
+ Args:
+ entry: An instance of Transaction.
+ Yields:
+ The accounts of the legs of the transaction.
+ """
+ forpostinginentry.postings:
+ yieldposting.account
+
def get_accounts_use_map(self, entries):
- """Gather the list of accounts from the list of entries.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A pair of dictionaries of account name to date, one for first date
- used and one for last date used. The keys should be identical.
- """
- accounts_first = {}
- accounts_last = {}
- for entry in entries:
- method = getattr(self, entry.__class__.__name__)
- for account_ in method(entry):
- if account_ not in accounts_first:
- accounts_first[account_] = entry.date
- accounts_last[account_] = entry.date
- return accounts_first, accounts_last
-
+
defget_accounts_use_map(self,entries):
+"""Gather the list of accounts from the list of entries.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A pair of dictionaries of account name to date, one for first date
+ used and one for last date used. The keys should be identical.
+ """
+ accounts_first={}
+ accounts_last={}
+ forentryinentries:
+ method=getattr(self,entry.__class__.__name__)
+ foraccount_inmethod(entry):
+ ifaccount_notinaccounts_first:
+ accounts_first[account_]=entry.date
+ accounts_last[account_]=entry.date
+ returnaccounts_first,accounts_last
+
def get_entry_accounts(self, entry):
- """Gather all the accounts references by a single directive.
-
- Note: This should get replaced by a method on each directive eventually,
- that would be the clean way to do this.
-
- Args:
- entry: A directive instance.
- Returns:
- A set of account name strings.
- """
- method = getattr(self, entry.__class__.__name__)
- return set(method(entry))
-
+
defget_entry_accounts(self,entry):
+"""Gather all the accounts references by a single directive.
+
+ Note: This should get replaced by a method on each directive eventually,
+ that would be the clean way to do this.
+
+ Args:
+ entry: A directive instance.
+ Returns:
+ A set of account name strings.
+ """
+ method=getattr(self,entry.__class__.__name__)
+ returnset(method(entry))
+
def get_account_components(entries):
- """Gather all the account components available in the given directives.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A list of strings, the unique account components, including the root
- account names.
- """
- accounts = get_accounts(entries)
- components = set()
- for account_name in accounts:
- components.update(account.split(account_name))
- return sorted(components)
-
+
defget_account_components(entries):
+"""Gather all the account components available in the given directives.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A list of strings, the unique account components, including the root
+ account names.
+ """
+ accounts=get_accounts(entries)
+ components=set()
+ foraccount_nameinaccounts:
+ components.update(account.split(account_name))
+ returnsorted(components)
+
def get_account_open_close(entries):
- """Fetch the open/close entries for each of the accounts.
-
- If an open or close entry happens to be duplicated, accept the earliest
- entry (chronologically).
-
- Args:
- entries: A list of directive instances.
- Returns:
- A map of account name strings to pairs of (open-directive, close-directive)
- tuples.
- """
- # A dict of account name to (open-entry, close-entry).
- open_close_map = defaultdict(lambda: [None, None])
- for entry in entries:
- if not isinstance(entry, (Open, Close)):
- continue
- open_close = open_close_map[entry.account]
- index = 0 if isinstance(entry, Open) else 1
- previous_entry = open_close[index]
- if previous_entry is not None:
- if previous_entry.date <= entry.date:
- entry = previous_entry
- open_close[index] = entry
-
- return dict(open_close_map)
-
+
defget_account_open_close(entries):
+"""Fetch the open/close entries for each of the accounts.
+
+ If an open or close entry happens to be duplicated, accept the earliest
+ entry (chronologically).
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A map of account name strings to pairs of (open-directive, close-directive)
+ tuples.
+ """
+ # A dict of account name to (open-entry, close-entry).
+ open_close_map=defaultdict(lambda:[None,None])
+ forentryinentries:
+ ifnotisinstance(entry,(Open,Close)):
+ continue
+ open_close=open_close_map[entry.account]
+ index=0ifisinstance(entry,Open)else1
+ previous_entry=open_close[index]
+ ifprevious_entryisnotNone:
+ ifprevious_entry.date<=entry.date:
+ entry=previous_entry
+ open_close[index]=entry
+
+ returndict(open_close_map)
+
def get_accounts(entries):
- """Gather all the accounts references by a list of directives.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A set of account strings.
- """
- _, accounts_last = _GetAccounts.get_accounts_use_map(entries)
- return accounts_last.keys()
-
+
defget_accounts(entries):
+"""Gather all the accounts references by a list of directives.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A set of account strings.
+ """
+ _,accounts_last=_GetAccounts.get_accounts_use_map(entries)
+ returnaccounts_last.keys()
+
def get_accounts_use_map(entries):
- """Gather all the accounts references by a list of directives.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A pair of dictionaries of account name to date, one for first date
- used and one for last date used. The keys should be identical.
- """
- return _GetAccounts.get_accounts_use_map(entries)
-
+
defget_accounts_use_map(entries):
+"""Gather all the accounts references by a list of directives.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A pair of dictionaries of account name to date, one for first date
+ used and one for last date used. The keys should be identical.
+ """
+ return_GetAccounts.get_accounts_use_map(entries)
+
def get_active_years(entries):
- """Yield all the years that have at least one entry in them.
-
- Args:
- entries: A list of directive instances.
- Yields:
- Unique dates see in the list of directives.
- """
- seen = set()
- prev_year = None
- for entry in entries:
- year = entry.date.year
- if year != prev_year:
- prev_year = year
- assert year not in seen
- seen.add(year)
- yield year
-
+
defget_active_years(entries):
+"""Yield all the years that have at least one entry in them.
+
+ Args:
+ entries: A list of directive instances.
+ Yields:
+ Unique dates see in the list of directives.
+ """
+ seen=set()
+ prev_year=None
+ forentryinentries:
+ year=entry.date.year
+ ifyear!=prev_year:
+ prev_year=year
+ assertyearnotinseen
+ seen.add(year)
+ yieldyear
+
def get_all_links(entries):
- """Return a list of all the links seen in the given entries.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A set of links strings.
- """
- all_links = set()
- for entry in entries:
- if not isinstance(entry, Transaction):
- continue
- if entry.links:
- all_links.update(entry.links)
- return sorted(all_links)
-
+
defget_all_links(entries):
+"""Return a list of all the links seen in the given entries.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A set of links strings.
+ """
+ all_links=set()
+ forentryinentries:
+ ifnotisinstance(entry,Transaction):
+ continue
+ ifentry.links:
+ all_links.update(entry.links)
+ returnsorted(all_links)
+
def get_all_payees(entries):
- """Return a list of all the unique payees seen in the given entries.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A set of payee strings.
- """
- all_payees = set()
- for entry in entries:
- if not isinstance(entry, Transaction):
- continue
- all_payees.add(entry.payee)
- all_payees.discard(None)
- return sorted(all_payees)
-
+
defget_all_payees(entries):
+"""Return a list of all the unique payees seen in the given entries.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A set of payee strings.
+ """
+ all_payees=set()
+ forentryinentries:
+ ifnotisinstance(entry,Transaction):
+ continue
+ all_payees.add(entry.payee)
+ all_payees.discard(None)
+ returnsorted(all_payees)
+
def get_all_tags(entries):
- """Return a list of all the tags seen in the given entries.
-
- Args:
- entries: A list of directive instances.
- Returns:
- A set of tag strings.
- """
- all_tags = set()
- for entry in entries:
- if not isinstance(entry, Transaction):
- continue
- if entry.tags:
- all_tags.update(entry.tags)
- return sorted(all_tags)
-
+
defget_all_tags(entries):
+"""Return a list of all the tags seen in the given entries.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A set of tag strings.
+ """
+ all_tags=set()
+ forentryinentries:
+ ifnotisinstance(entry,Transaction):
+ continue
+ ifentry.tags:
+ all_tags.update(entry.tags)
+ returnsorted(all_tags)
+
create_missing – A boolean, true if you want to automatically generate
-missing commodity directives if not present in the output map.
@@ -10711,60 +13612,16 @@
Source code in beancount/core/getters.py
-
def get_commodity_map(entries, create_missing=True):
- """Create map of commodity names to Commodity entries.
-
- Args:
- entries: A list of directive instances.
- create_missing: A boolean, true if you want to automatically generate
- missing commodity directives if not present in the output map.
- Returns:
- A map of commodity name strings to Commodity directives.
- """
- if not entries:
- return {}
-
- commodities_map = {}
- for entry in entries:
- if isinstance(entry, Commodity):
- commodities_map[entry.currency] = entry
-
- elif isinstance(entry, Transaction):
- for posting in entry.postings:
-
- # Main currency.
- units = posting.units
- commodities_map.setdefault(units.currency, None)
-
- # Currency in cost.
- cost = posting.cost
- if cost:
- commodities_map.setdefault(cost.currency, None)
-
- # Currency in price.
- price = posting.price
- if price:
- commodities_map.setdefault(price.currency, None)
-
- elif isinstance(entry, Balance):
- commodities_map.setdefault(entry.amount.currency, None)
-
- elif isinstance(entry, Price):
- commodities_map.setdefault(entry.currency, None)
-
- if create_missing:
- # Create missing Commodity directives when they haven't been specified explicitly.
- # (I think it might be better to always do this from the loader.)
- date = entries[0].date
- meta = data.new_metadata('<getters>', 0)
- commodities_map = {
- commodity: (entry
- if entry is not None
- else Commodity(meta, date, commodity))
- for commodity, entry in commodities_map.items()}
-
- return commodities_map
-
+
defget_commodity_directives(entries):
+"""Create map of commodity names to Commodity entries.
+
+ Args:
+ entries: A list of directive instances.
+ Returns:
+ A map of commodity name strings to Commodity directives.
+ """
+ return{entry.currency:entryforentryinentriesifisinstance(entry,Commodity)}
+
def get_entry_accounts(entry):
- """Gather all the accounts references by a single directive.
-
- Note: This should get replaced by a method on each directive eventually,
- that would be the clean way to do this.
-
- Args:
- entries: A directive instance.
- Returns:
- A set of account strings.
- """
- return _GetAccounts.get_entry_accounts(entry)
-
+
defget_entry_accounts(entry):
+"""Gather all the accounts references by a single directive.
+
+ Note: This should get replaced by a method on each directive eventually,
+ that would be the clean way to do this.
+
+ Args:
+ entries: A directive instance.
+ Returns:
+ A set of account strings.
+ """
+ return_GetAccounts.get_entry_accounts(entry)
+
def get_leveln_parent_accounts(account_names, level, nrepeats=0):
- """Return a list of all the unique leaf names at level N in an account hierarchy.
-
- Args:
- account_names: A list of account names (strings)
- level: The level to cross-cut. 0 is for root accounts.
- nrepeats: A minimum number of times a leaf is required to be present in the
- the list of unique account names in order to be returned by this function.
- Returns:
- A list of leaf node names.
- """
- leveldict = defaultdict(int)
- for account_name in set(account_names):
- components = account.split(account_name)
- if level < len(components):
- leveldict[components[level]] += 1
- levels = {level_
- for level_, count in leveldict.items()
- if count > nrepeats}
- return sorted(levels)
-
+
defget_leveln_parent_accounts(account_names,level,nrepeats=0):
+"""Return a list of all the unique leaf names at level N in an account hierarchy.
+
+ Args:
+ account_names: A list of account names (strings)
+ level: The level to cross-cut. 0 is for root accounts.
+ nrepeats: A minimum number of times a leaf is required to be present in the
+ the list of unique account names in order to be returned by this function.
+ Returns:
+ A list of leaf node names.
+ """
+ leveldict=defaultdict(int)
+ foraccount_nameinset(account_names):
+ components=account.split(account_name)
+ iflevel<len(components):
+ leveldict[components[level]]+=1
+ levels={level_forlevel_,countinleveldict.items()ifcount>nrepeats}
+ returnsorted(levels)
+
def get_min_max_dates(entries, types=None):
- """Return the minimum and maximum dates in the list of entries.
-
- Args:
- entries: A list of directive instances.
- types: An optional tuple of types to restrict the entries to.
- Returns:
- A pair of datetime.date dates, the minimum and maximum dates seen in the
- directives.
- """
- date_first = date_last = None
-
- for entry in entries:
- if types and not isinstance(entry, types):
- continue
- date_first = entry.date
- break
-
- for entry in reversed(entries):
- if types and not isinstance(entry, types):
- continue
- date_last = entry.date
- break
-
- return (date_first, date_last)
-
+
defget_min_max_dates(entries,types=None):
+"""Return the minimum and maximum dates in the list of entries.
+
+ Args:
+ entries: A list of directive instances.
+ types: An optional tuple of types to restrict the entries to.
+ Returns:
+ A pair of datetime.date dates, the minimum and maximum dates seen in the
+ directives.
+ """
+ date_first=date_last=None
+
+ forentryinentries:
+ iftypesandnotisinstance(entry,types):
+ continue
+ date_first=entry.date
+ break
+
+ forentryinreversed(entries):
+ iftypesandnotisinstance(entry,types):
+ continue
+ date_last=entry.date
+ break
+
+ return(date_first,date_last)
+
def get_values_meta(name_to_entries_map, *meta_keys, default=None):
- """Get a map of the metadata from a map of entries values.
-
- Given a dict of some key to a directive instance (or None), return a mapping
- of the key to the metadata extracted from each directive, or a default
- value. This can be used to gather a particular piece of metadata from an
- accounts map or a commodities map.
-
- Args:
- name_to_entries_map: A dict of something to an entry or None.
- meta_keys: A list of strings, the keys to fetch from the metadata.
- default: The default value to use if the metadata is not available or if
- the value/entry is None.
- Returns:
- A mapping of the keys of name_to_entries_map to the values of the 'meta_keys'
- metadata. If there are multiple 'meta_keys', each value is a tuple of them.
- On the other hand, if there is only a single one, the value itself is returned.
- """
- value_map = {}
- for key, entry in name_to_entries_map.items():
- value_list = []
- for meta_key in meta_keys:
- value_list.append(entry.meta.get(meta_key, default)
- if entry is not None
- else default)
- value_map[key] = (value_list[0]
- if len(meta_keys) == 1
- else tuple(value_list))
- return value_map
-
+
defget_values_meta(name_to_entries_map,*meta_keys,default=None):
+"""Get a map of the metadata from a map of entries values.
+
+ Given a dict of some key to a directive instance (or None), return a mapping
+ of the key to the metadata extracted from each directive, or a default
+ value. This can be used to gather a particular piece of metadata from an
+ accounts map or a commodities map.
+
+ Args:
+ name_to_entries_map: A dict of something to an entry or None.
+ meta_keys: A list of strings, the keys to fetch from the metadata.
+ default: The default value to use if the metadata is not available or if
+ the value/entry is None.
+ Returns:
+ A mapping of the keys of name_to_entries_map to the values of the 'meta_keys'
+ metadata. If there are multiple 'meta_keys', each value is a tuple of them.
+ On the other hand, if there is only a single one, the value itself is returned.
+ """
+ value_map={}
+ forkey,entryinname_to_entries_map.items():
+ value_list=[]
+ formeta_keyinmeta_keys:
+ value_list.append(
+ entry.meta.get(meta_key,default)ifentryisnotNoneelsedefault
+ )
+ value_map[key]=value_list[0]iflen(meta_keys)==1elsetuple(value_list)
+ returnvalue_map
+
def compute_entries_balance(entries, prefix=None, date=None):
- """Compute the balance of all postings of a list of entries.
-
- Sum up all the positions in all the postings of all the transactions in the
- list of entries and return an inventory of it.
-
- Args:
- entries: A list of directives.
- prefix: If specified, a prefix string to restrict by account name. Only
- postings with an account that starts with this prefix will be summed up.
- date: A datetime.date instance at which to stop adding up the balance.
- The date is exclusive.
- Returns:
- An instance of Inventory.
- """
- total_balance = Inventory()
- for entry in entries:
- if not (date is None or entry.date < date):
- break
- if isinstance(entry, Transaction):
- for posting in entry.postings:
- if prefix is None or posting.account.startswith(prefix):
- total_balance.add_position(posting)
- return total_balance
-
+
defcompute_entries_balance(entries,prefix=None,date=None):
+"""Compute the balance of all postings of a list of entries.
+
+ Sum up all the positions in all the postings of all the transactions in the
+ list of entries and return an inventory of it.
+
+ Args:
+ entries: A list of directives.
+ prefix: If specified, a prefix string to restrict by account name. Only
+ postings with an account that starts with this prefix will be summed up.
+ date: A datetime.date instance at which to stop adding up the balance.
+ The date is exclusive.
+ Returns:
+ An instance of Inventory.
+ """
+ total_balance=Inventory()
+ forentryinentries:
+ ifnot(dateisNoneorentry.date<date):
+ break
+ ifisinstance(entry,Transaction):
+ forpostinginentry.postings:
+ ifprefixisNoneorposting.account.startswith(prefix):
+ total_balance.add_position(posting)
+ returntotal_balance
+
def compute_entry_context(entries, context_entry):
- """Compute the balances of all accounts referenced by entry up to entry.
-
- This provides the inventory of the accounts to which the entry is to be
- applied, before and after.
-
- Args:
- entries: A list of directives.
- context_entry: The entry for which we want to obtain the before and after
- context.
- Returns:
- Two dicts of account-name to Inventory instance, one which represents the
- context before the entry is applied, and one that represents the context
- after it has been applied.
- """
- assert context_entry is not None, "context_entry is missing."
-
- # Get the set of accounts for which to compute the context.
- context_accounts = getters.get_entry_accounts(context_entry)
-
- # Iterate over the entries until we find the target one and accumulate the
- # balance.
- context_before = collections.defaultdict(inventory.Inventory)
- for entry in entries:
- if entry is context_entry:
- break
- if isinstance(entry, Transaction):
- for posting in entry.postings:
- if not any(posting.account == account
- for account in context_accounts):
- continue
- balance = context_before[posting.account]
- balance.add_position(posting)
-
- # Compute the after context for the entry.
- context_after = copy.deepcopy(context_before)
- if isinstance(context_entry, Transaction):
- for posting in entry.postings:
- balance = context_after[posting.account]
- balance.add_position(posting)
-
- return context_before, context_after
-
+
defcompute_entry_context(entries,context_entry,additional_accounts=None):
+"""Compute the balances of all accounts referenced by entry up to entry.
+
+ This provides the inventory of the accounts to which the entry is to be
+ applied, before and after.
+
+ Args:
+ entries: A list of directives.
+ context_entry: The entry for which we want to obtain the before and after
+ context.
+ additional_accounts: Additional list of accounts to include in calculating
+ the balance. This is used when invoked for debugging, in case the booked
+ & interpolated transaction doesn't have all the accounts we need because
+ it had an error (the booking code will remove invalid postings).
+ Returns:
+ Two dicts of account-name to Inventory instance, one which represents the
+ context before the entry is applied, and one that represents the context
+ after it has been applied.
+ """
+ assertcontext_entryisnotNone,"context_entry is missing."
+
+ # Get the set of accounts for which to compute the context.
+ context_accounts=getters.get_entry_accounts(context_entry)
+ ifadditional_accounts:
+ context_accounts.update(additional_accounts)
+
+ # Iterate over the entries until we find the target one and accumulate the
+ # balance.
+ context_before=collections.defaultdict(inventory.Inventory)
+ forentryinentries:
+ ifentryiscontext_entry:
+ break
+ ifisinstance(entry,Transaction):
+ forpostinginentry.postings:
+ ifnotany(posting.account==accountforaccountincontext_accounts):
+ continue
+ balance=context_before[posting.account]
+ balance.add_position(posting)
+
+ # Compute the after context for the entry.
+ context_after=copy.deepcopy(context_before)
+ ifisinstance(context_entry,Transaction):
+ forpostinginentry.postings:
+ balance=context_after[posting.account]
+ balance.add_position(posting)
+
+ returncontext_before,context_after
+
def compute_residual(postings):
- """Compute the residual of a set of complete postings, and the per-currency precision.
-
- This is used to cross-check a balanced transaction.
-
- The precision is the maximum fraction that is being used for each currency
- (a dict). We use the currency of the weight amount in order to infer the
- quantization precision for each currency. Integer amounts aren't
- contributing to the determination of precision.
-
- Args:
- postings: A list of Posting instances.
- Returns:
- An instance of Inventory, with the residual of the given list of postings.
- """
- inventory = Inventory()
- for posting in postings:
- # Skip auto-postings inserted to absorb the residual (rounding error).
- if posting.meta and posting.meta.get(AUTOMATIC_RESIDUAL, False):
- continue
- # Add to total residual balance.
- inventory.add_amount(convert.get_weight(posting))
- return inventory
-
+
defcompute_residual(postings):
+"""Compute the residual of a set of complete postings, and the per-currency precision.
+
+ This is used to cross-check a balanced transaction.
+
+ The precision is the maximum fraction that is being used for each currency
+ (a dict). We use the currency of the weight amount in order to infer the
+ quantization precision for each currency. Integer amounts aren't
+ contributing to the determination of precision.
+
+ Args:
+ postings: A list of Posting instances.
+ Returns:
+ An instance of Inventory, with the residual of the given list of postings.
+ """
+ inventory=Inventory()
+ forpostinginpostings:
+ # Skip auto-postings inserted to absorb the residual (rounding error).
+ ifposting.metaandposting.meta.get(AUTOMATIC_RESIDUAL,False):
+ continue
+ # Add to total residual balance.
+ inventory.add_amount(convert.get_weight(posting))
+ returninventory
+
def fill_residual_posting(entry, account_rounding):
- """If necessary, insert a posting to absorb the residual.
- This makes the transaction balance exactly.
-
- Note: This was developed in order to tweak transactions before exporting
- them to Ledger. A better method would be to enable the feature that
- automatically inserts these rounding postings on all transactions, and so
- maybe this method can be deprecated if we do so.
-
- Args:
- entry: An instance of a Transaction.
- account_rounding: A string, the name of the rounding account that
- absorbs residuals / rounding errors.
- Returns:
- A possibly new, modified entry with a new posting. If a residual
- was not needed - the transaction already balanced perfectly - no new
- leg is inserted.
-
- """
- residual = compute_residual(entry.postings)
- if not residual.is_empty():
- new_postings = list(entry.postings)
- new_postings.extend(get_residual_postings(residual, account_rounding))
- entry = entry._replace(postings=new_postings)
- return entry
-
+
deffill_residual_posting(entry,account_rounding):
+"""If necessary, insert a posting to absorb the residual.
+ This makes the transaction balance exactly.
+
+ Note: This was developed in order to tweak transactions before exporting
+ them to Ledger. A better method would be to enable the feature that
+ automatically inserts these rounding postings on all transactions, and so
+ maybe this method can be deprecated if we do so.
+
+ Args:
+ entry: An instance of a Transaction.
+ account_rounding: A string, the name of the rounding account that
+ absorbs residuals / rounding errors.
+ Returns:
+ A possibly new, modified entry with a new posting. If a residual
+ was not needed - the transaction already balanced perfectly - no new
+ leg is inserted.
+
+ """
+ residual=compute_residual(entry.postings)
+ ifnotresidual.is_empty():
+ new_postings=list(entry.postings)
+ new_postings.extend(get_residual_postings(residual,account_rounding))
+ entry=entry._replace(postings=new_postings)
+ returnentry
+
def get_residual_postings(residual, account_rounding):
- """Create postings to book the given residuals.
-
- Args:
- residual: An Inventory, the residual positions.
- account_rounding: A string, the name of the rounding account that
- absorbs residuals / rounding errors.
- Returns:
- A list of new postings to be inserted to reduce the given residual.
- """
- meta = {AUTOMATIC_META: True,
- AUTOMATIC_RESIDUAL: True}
- return [Posting(account_rounding, -position.units, position.cost, None, None,
- meta.copy())
- for position in residual.get_positions()]
-
+
defget_residual_postings(residual,account_rounding):
+"""Create postings to book the given residuals.
+
+ Args:
+ residual: An Inventory, the residual positions.
+ account_rounding: A string, the name of the rounding account that
+ absorbs residuals / rounding errors.
+ Returns:
+ A list of new postings to be inserted to reduce the given residual.
+ """
+ meta={AUTOMATIC_META:True,AUTOMATIC_RESIDUAL:True}
+ return[
+ Posting(account_rounding,-position.units,position.cost,None,None,meta.copy())
+ forpositioninresidual.get_positions()
+ ]
+
def has_nontrivial_balance(posting):
- """Return True if a Posting has a balance amount that would have to be calculated.
-
- Args:
- posting: A Posting instance.
- Returns:
- A boolean.
- """
- return posting.cost or posting.price
-
+
defhas_nontrivial_balance(posting):
+"""Return True if a Posting has a balance amount that would have to be calculated.
+
+ Args:
+ posting: A Posting instance.
+ Returns:
+ A boolean.
+ """
+ returnposting.costorposting.price
+
def infer_tolerances(postings, options_map, use_cost=None):
- """Infer tolerances from a list of postings.
-
- The tolerance is the maximum fraction that is being used for each currency
- (a dict). We use the currency of the weight amount in order to infer the
- quantization precision for each currency. Integer amounts aren't
- contributing to the determination of precision.
-
- The 'use_cost' option allows one to experiment with letting postings at cost
- and at price influence the maximum value of the tolerance. It's tricky to
- use and alters the definition of the tolerance in a non-trivial way, if you
- use it. The tolerance is expanded by the sum of the cost times a fraction 'M'
- of the smallest digits in the number of units for all postings held at cost.
-
- For example, in this transaction:
-
- 2006-01-17 * "Plan Contribution"
- Assets:Investments:VWELX 18.572 VWELX {30.96 USD}
- Assets:Investments:VWELX 18.572 VWELX {30.96 USD}
- Assets:Investments:Cash -1150.00 USD
-
- The tolerance for units of USD will calculated as the MAXIMUM of:
-
- 0.01 * M = 0.005 (from the 1150.00 USD leg)
-
- The sum of
- 0.001 * M x 30.96 = 0.01548 +
- 0.001 * M x 30.96 = 0.01548
- = 0.03096
-
- So the tolerance for USD in this case is max(0.005, 0.03096) = 0.03096. Prices
- contribute similarly to the maximum tolerance allowed.
-
- Note that 'M' above is the inferred_tolerance_multiplier and its default
- value is 0.5.
-
- Args:
- postings: A list of Posting instances.
- options_map: A dict of options.
- use_cost: A boolean, true if we should be using a combination of the smallest
- digit of the number times the cost or price in order to infer the tolerance.
- If the value is left unspecified (as 'None'), the default value can be
- overridden by setting an option.
- Returns:
- A dict of currency to the tolerated difference amount to be used for it,
- e.g. 0.005.
- """
- if use_cost is None:
- use_cost = options_map["infer_tolerance_from_cost"]
-
- inferred_tolerance_multiplier = options_map["inferred_tolerance_multiplier"]
-
- default_tolerances = options_map["inferred_tolerance_default"]
- tolerances = default_tolerances.copy()
-
- cost_tolerances = collections.defaultdict(D)
- for posting in postings:
- # Skip the precision on automatically inferred postings.
- if posting.meta and AUTOMATIC_META in posting.meta:
- continue
- units = posting.units
- if not (isinstance(units, Amount) and isinstance(units.number, Decimal)):
- continue
-
- # Compute bounds on the number.
- currency = units.currency
- expo = units.number.as_tuple().exponent
- if expo < 0:
- # Note: the exponent is a negative value.
- tolerance = ONE.scaleb(expo) * inferred_tolerance_multiplier
- tolerances[currency] = max(tolerance,
- tolerances.get(currency, -1024))
-
- if not use_cost:
- continue
-
- # Compute bounds on the smallest digit of the number implied as cost.
- cost = posting.cost
- if cost is not None:
- cost_currency = cost.currency
- if isinstance(cost, Cost):
- cost_tolerance = min(tolerance * cost.number, MAXIMUM_TOLERANCE)
- else:
- assert isinstance(cost, CostSpec)
- cost_tolerance = MAXIMUM_TOLERANCE
- for cost_number in cost.number_total, cost.number_per:
- if cost_number is None or cost_number is MISSING:
- continue
- cost_tolerance = min(tolerance * cost_number, cost_tolerance)
- cost_tolerances[cost_currency] += cost_tolerance
-
- # Compute bounds on the smallest digit of the number implied as cost.
- price = posting.price
- if isinstance(price, Amount) and isinstance(price.number, Decimal):
- price_currency = price.currency
- price_tolerance = min(tolerance * price.number, MAXIMUM_TOLERANCE)
- cost_tolerances[price_currency] += price_tolerance
-
- for currency, tolerance in cost_tolerances.items():
- tolerances[currency] = max(tolerance, tolerances.get(currency, -1024))
-
- default = tolerances.pop('*', ZERO)
- return defdict.ImmutableDictWithDefault(tolerances, default=default)
-
+
definfer_tolerances(postings,options_map,use_cost=None):
+"""Infer tolerances from a list of postings.
+
+ The tolerance is the maximum fraction that is being used for each currency
+ (a dict). We use the currency of the weight amount in order to infer the
+ quantization precision for each currency. Integer amounts aren't
+ contributing to the determination of precision.
+
+ The 'use_cost' option allows one to experiment with letting postings at cost
+ and at price influence the maximum value of the tolerance. It's tricky to
+ use and alters the definition of the tolerance in a non-trivial way, if you
+ use it. The tolerance is expanded by the sum of the cost times a fraction 'M'
+ of the smallest digits in the number of units for all postings held at cost.
+
+ For example, in this transaction:
+
+ 2006-01-17 * "Plan Contribution"
+ Assets:Investments:VWELX 18.572 VWELX {30.96 USD}
+ Assets:Investments:VWELX 18.572 VWELX {30.96 USD}
+ Assets:Investments:Cash -1150.00 USD
+
+ The tolerance for units of USD will calculated as the MAXIMUM of:
+
+ 0.01 * M = 0.005 (from the 1150.00 USD leg)
+
+ The sum of
+ 0.001 * M x 30.96 = 0.01548 +
+ 0.001 * M x 30.96 = 0.01548
+ = 0.03096
+
+ So the tolerance for USD in this case is max(0.005, 0.03096) = 0.03096. Prices
+ contribute similarly to the maximum tolerance allowed.
+
+ Note that 'M' above is the inferred_tolerance_multiplier and its default
+ value is 0.5.
+
+ Args:
+ postings: A list of Posting instances.
+ options_map: A dict of options.
+ use_cost: A boolean, true if we should be using a combination of the smallest
+ digit of the number times the cost or price in order to infer the tolerance.
+ If the value is left unspecified (as 'None'), the default value can be
+ overridden by setting an option.
+ Returns:
+ A dict of currency to the tolerated difference amount to be used for it,
+ e.g. 0.005.
+ """
+ ifuse_costisNone:
+ use_cost=options_map["infer_tolerance_from_cost"]
+
+ inferred_tolerance_multiplier=options_map["inferred_tolerance_multiplier"]
+
+ default_tolerances=options_map["inferred_tolerance_default"]
+ tolerances=default_tolerances.copy()
+
+ cost_tolerances=collections.defaultdict(D)
+ forpostinginpostings:
+ # Skip the precision on automatically inferred postings.
+ ifposting.metaandAUTOMATIC_METAinposting.meta:
+ continue
+ units=posting.units
+ ifnot(isinstance(units,Amount)andisinstance(units.number,Decimal)):
+ continue
+
+ # Compute bounds on the number.
+ currency=units.currency
+ expo=units.number.as_tuple().exponent
+ ifexpo<0:
+ # Note: the exponent is a negative value.
+ tolerance=ONE.scaleb(expo)*inferred_tolerance_multiplier
+
+ # Note that we take the max() and not the min() here because the
+ # tolerance has a dual purpose: it's used to infer the resolution
+ # for interpolation (where we might want the min()) and also for
+ # balance checks (where we favor the looser/larger tolerance).
+ tolerances[currency]=max(tolerance,tolerances.get(currency,-1024))
+
+ ifnotuse_cost:
+ continue
+
+ # Compute bounds on the smallest digit of the number implied as cost.
+ cost=posting.cost
+ ifcostisnotNone:
+ cost_currency=cost.currency
+ ifisinstance(cost,Cost):
+ cost_tolerance=min(tolerance*cost.number,MAXIMUM_TOLERANCE)
+ else:
+ assertisinstance(cost,CostSpec)
+ cost_tolerance=MAXIMUM_TOLERANCE
+ forcost_numberincost.number_total,cost.number_per:
+ ifcost_numberisNoneorcost_numberisMISSING:
+ continue
+ cost_tolerance=min(tolerance*cost_number,cost_tolerance)
+ cost_tolerances[cost_currency]+=cost_tolerance
+
+ # Compute bounds on the smallest digit of the number implied as cost.
+ price=posting.price
+ ifisinstance(price,Amount)andisinstance(price.number,Decimal):
+ price_currency=price.currency
+ price_tolerance=min(tolerance*price.number,MAXIMUM_TOLERANCE)
+ cost_tolerances[price_currency]+=price_tolerance
+
+ forcurrency,toleranceincost_tolerances.items():
+ tolerances[currency]=max(tolerance,tolerances.get(currency,-1024))
+
+ default=tolerances.pop("*",ZERO)
+ returndefdict.ImmutableDictWithDefault(tolerances,default=default)
+
def is_tolerance_user_specified(tolerance):
- """Return true if the given tolerance number was user-specified.
-
- This would allow the user to provide a tolerance like # 0.1234 but not
- 0.123456. This is used to detect whether a tolerance value # is input by the
- user and not inferred automatically.
-
- Args:
- tolerance: An instance of Decimal.
- Returns:
- A boolean.
- """
- return len(tolerance.as_tuple().digits) < MAX_TOLERANCE_DIGITS
-
+
defis_tolerance_user_specified(tolerance):
+"""Return true if the given tolerance number was user-specified.
+
+ This would allow the user to provide a tolerance like # 0.1234 but not
+ 0.123456. This is used to detect whether a tolerance value # is input by the
+ user and not inferred automatically.
+
+ Args:
+ tolerance: An instance of Decimal.
+ Returns:
+ A boolean.
+ """
+ returnlen(tolerance.as_tuple().digits)<MAX_TOLERANCE_DIGITS
+
def quantize_with_tolerance(tolerances, currency, number):
- """Quantize the units using the tolerance dict.
-
- Args:
- tolerances: A dict of currency to tolerance Decimalvalues.
- number: A number to quantize.
- currency: A string currency.
- Returns:
- A Decimal, the number possibly quantized.
- """
- # Applying rounding to the default tolerance, if there is one.
- tolerance = tolerances.get(currency)
- if tolerance:
- quantum = (tolerance * 2).normalize()
-
- # If the tolerance is a neat number provided by the user,
- # quantize the inferred numbers. See doc on quantize():
- #
- # Unlike other operations, if the length of the coefficient
- # after the quantize operation would be greater than
- # precision, then an InvalidOperation is signaled. This
- # guarantees that, unless there is an error condition, the
- # quantized exponent is always equal to that of the
- # right-hand operand.
- if is_tolerance_user_specified(quantum):
- number = number.quantize(quantum)
- return number
-
+
defquantize_with_tolerance(tolerances,currency,number):
+"""Quantize the units using the tolerance dict.
+
+ Args:
+ tolerances: A dict of currency to tolerance Decimalvalues.
+ number: A number to quantize.
+ currency: A string currency.
+ Returns:
+ A Decimal, the number possibly quantized.
+ """
+ # Applying rounding to the default tolerance, if there is one.
+ tolerance=tolerances.get(currency)
+ iftolerance:
+ quantum=(tolerance*2).normalize()
+
+ # If the tolerance is a neat number provided by the user,
+ # quantize the inferred numbers. See doc on quantize():
+ #
+ # Unlike other operations, if the length of the coefficient
+ # after the quantize operation would be greater than
+ # precision, then an InvalidOperation is signaled. This
+ # guarantees that, unless there is an error condition, the
+ # quantized exponent is always equal to that of the
+ # right-hand operand.
+ ifis_tolerance_user_specified(quantum):
+ number=number.quantize(quantum)
+ returnnumber
+
@@ -12227,6 +15093,14 @@
This is meant to accommodate both booked and non-booked amounts. The clever
trick that we pull to do this is that for positions which aren't booked, we
simply leave the 'cost' as None. This is the case for most of the transactions.
+
= Conversions =
+
If it often desired to convert this inventory into an equivalent position for
+its cost, or to just flatten all the positions with the same currency and count
+the number of units, or to compute the market value for the inventory at a
+specific date. You do these conversions using the reduce() method:
def __abs__(self):
- """Return an inventory with the absolute value of each position.
-
- Returns:
- An instance of Inventory.
- """
- return Inventory({key: abs(pos) for key, pos in self.items()})
-
+
def__abs__(self):
+"""Return an inventory with the absolute value of each position.
+
+ Returns:
+ An instance of Inventory.
+ """
+ returnInventory({key:abs(pos)forkey,posinself.items()})
+
def __add__(self, other):
- """Add another inventory to this one. This inventory is not modified.
-
- Args:
- other: An instance of Inventory.
- Returns:
- A new instance of Inventory.
- """
- new_inventory = self.__copy__()
- new_inventory.add_inventory(other)
- return new_inventory
-
+
def__add__(self,other):
+"""Add another inventory to this one. This inventory is not modified.
+
+ Args:
+ other: An instance of Inventory.
+ Returns:
+ A new instance of Inventory.
+ """
+ new_inventory=self.__copy__()
+ new_inventory.add_inventory(other)
+ returnnew_inventory
+
def __copy__(self):
- """A shallow copy of this inventory object.
-
- Returns:
- An instance of Inventory, equal to this one.
- """
- return Inventory(self)
-
+
def__copy__(self):
+"""A shallow copy of this inventory object.
+
+ Returns:
+ An instance of Inventory, equal to this one.
+ """
+ returnInventory(self)
+
def add_inventory(self, other):
- """Add all the positions of another Inventory instance to this one.
-
- Args:
- other: An instance of Inventory to add to this one.
- Returns:
- This inventory, modified.
- """
- if self.is_empty():
- # Optimization for empty inventories; if the current one is empty,
- # adopt all of the other inventory's positions without running
- # through the full aggregation checks. This should be very cheap. We
- # can do this because the positions are immutable.
- self.update(other)
- else:
- for position in other.get_positions():
- self.add_position(position)
- return self
-
+
defadd_inventory(self,other):
+"""Add all the positions of another Inventory instance to this one.
+
+ Args:
+ other: An instance of Inventory to add to this one.
+ Returns:
+ This inventory, modified.
+ """
+ ifself.is_empty():
+ # Optimization for empty inventories; if the current one is empty,
+ # adopt all of the other inventory's positions without running
+ # through the full aggregation checks. This should be very cheap. We
+ # can do this because the positions are immutable.
+ self.update(other)
+ else:
+ forpositioninother.get_positions():
+ self.add_position(position)
+ returnself
+
def __init__(self, positions=None):
- """Create a new inventory using a list of existing positions.
-
- Args:
- positions: A list of Position instances or an existing dict or
- Inventory instance.
- """
- if isinstance(positions, (dict, Inventory)):
- dict.__init__(self, positions)
- else:
- dict.__init__(self)
- if positions:
- assert isinstance(positions, Iterable)
- for position in positions:
- self.add_position(position)
-
+
def__init__(self,positions=None):
+"""Create a new inventory using a list of existing positions.
+
+ Args:
+ positions: A list of Position instances or an existing dict or
+ Inventory instance.
+ """
+ ifisinstance(positions,(dict,Inventory)):
+ dict.__init__(self,positions)
+ else:
+ dict.__init__(self)
+ ifpositions:
+ assertisinstance(positions,Iterable)
+ forpositioninpositions:
+ self.add_position(position)
+
def __mul__(self, scalar):
- """Scale/multiply the contents of the inventory.
-
- Args:
- scalar: A Decimal.
- Returns:
- An instance of Inventory.
- """
- return Inventory({key: pos * scalar for key, pos in self.items()})
-
+
def__mul__(self,scalar):
+"""Scale/multiply the contents of the inventory.
+
+ Args:
+ scalar: A Decimal.
+ Returns:
+ An instance of Inventory.
+ """
+ returnInventory({key:pos*scalarforkey,posinself.items()})
+
def __neg__(self):
- """Return an inventory with the negative of values of this one.
-
- Returns:
- An instance of Inventory.
- """
- return Inventory({key: -pos for key, pos in self.items()})
-
+
def__neg__(self):
+"""Return an inventory with the negative of values of this one.
+
+ Returns:
+ An instance of Inventory.
+ """
+ returnInventory({key:-posforkey,posinself.items()})
+
A pair of (position, booking) where 'position' is the position that
-that was modified BEFORE it was modified, and where 'booking' is a
-Booking enum that hints at how the lot was booked to this inventory.
+
A pair of (position, matched) where 'position' is the position that
+that was modified BEFORE it was modified, and where 'matched' is a
+MatchResult enum that hints at how the lot was booked to this inventory.
Position may be None if there is no corresponding Position object,
e.g. the position was deleted.
@@ -12972,57 +15785,61 @@
Source code in beancount/core/inventory.py
-
def add_amount(self, units, cost=None):
- """Add to this inventory using amount and cost. This adds with strict lot
- matching, that is, no partial matches are done on the arguments to the
- keys of the inventory.
-
- Args:
- units: An Amount instance to add.
- cost: An instance of Cost or None, as a key to the inventory.
- Returns:
- A pair of (position, booking) where 'position' is the position that
- that was modified BEFORE it was modified, and where 'booking' is a
- Booking enum that hints at how the lot was booked to this inventory.
- Position may be None if there is no corresponding Position object,
- e.g. the position was deleted.
- """
- if ASSERTS_TYPES:
- assert isinstance(units, Amount), (
- "Internal error: {!r} (type: {})".format(units, type(units).__name__))
- assert cost is None or isinstance(cost, Cost), (
- "Internal error: {!r} (type: {})".format(cost, type(cost).__name__))
-
- # Find the position.
- key = (units.currency, cost)
- pos = self.get(key, None)
-
- if pos is not None:
- # Note: In order to augment or reduce, all the fields have to match.
-
- # Check if reducing.
- booking = (Booking.REDUCED
- if not same_sign(pos.units.number, units.number)
- else Booking.AUGMENTED)
-
- # Compute the new number of units.
- number = pos.units.number + units.number
- if number == ZERO:
- # If empty, delete the position.
- del self[key]
- else:
- # Otherwise update it.
- self[key] = Position(Amount(number, units.currency), cost)
- else:
- # If not found, create a new one.
- if units.number == ZERO:
- booking = Booking.IGNORED
- else:
- self[key] = Position(units, cost)
- booking = Booking.CREATED
-
- return pos, booking
-
+
defadd_amount(self,units,cost=None):
+"""Add to this inventory using amount and cost. This adds with strict lot
+ matching, that is, no partial matches are done on the arguments to the
+ keys of the inventory.
+
+ Args:
+ units: An Amount instance to add.
+ cost: An instance of Cost or None, as a key to the inventory.
+ Returns:
+ A pair of (position, matched) where 'position' is the position that
+ that was modified BEFORE it was modified, and where 'matched' is a
+ MatchResult enum that hints at how the lot was booked to this inventory.
+ Position may be None if there is no corresponding Position object,
+ e.g. the position was deleted.
+ """
+ ifASSERTS_TYPES:
+ assertisinstance(units,Amount),"Internal error: {!r} (type: {})".format(
+ units,type(units).__name__
+ )
+ assertcostisNoneorisinstance(
+ cost,Cost
+ ),"Internal error: {!r} (type: {})".format(cost,type(cost).__name__)
+
+ # Find the position.
+ key=(units.currency,cost)
+ pos=self.get(key,None)
+
+ ifposisnotNone:
+ # Note: In order to augment or reduce, all the fields have to match.
+
+ # Check if reducing.
+ booking=(
+ MatchResult.REDUCED
+ ifnotsame_sign(pos.units.number,units.number)
+ elseMatchResult.AUGMENTED
+ )
+
+ # Compute the new number of units.
+ number=pos.units.number+units.number
+ ifnumber==ZERO:
+ # If empty, delete the position.
+ delself[key]
+ else:
+ # Otherwise update it.
+ self[key]=Position(Amount(number,units.currency),cost)
+ else:
+ # If not found, create a new one.
+ ifunits.number==ZERO:
+ booking=MatchResult.IGNORED
+ else:
+ self[key]=Position(units,cost)
+ booking=MatchResult.CREATED
+
+ returnpos,booking
+
def add_inventory(self, other):
- """Add all the positions of another Inventory instance to this one.
-
- Args:
- other: An instance of Inventory to add to this one.
- Returns:
- This inventory, modified.
- """
- if self.is_empty():
- # Optimization for empty inventories; if the current one is empty,
- # adopt all of the other inventory's positions without running
- # through the full aggregation checks. This should be very cheap. We
- # can do this because the positions are immutable.
- self.update(other)
- else:
- for position in other.get_positions():
- self.add_position(position)
- return self
-
+
defadd_inventory(self,other):
+"""Add all the positions of another Inventory instance to this one.
+
+ Args:
+ other: An instance of Inventory to add to this one.
+ Returns:
+ This inventory, modified.
+ """
+ ifself.is_empty():
+ # Optimization for empty inventories; if the current one is empty,
+ # adopt all of the other inventory's positions without running
+ # through the full aggregation checks. This should be very cheap. We
+ # can do this because the positions are immutable.
+ self.update(other)
+ else:
+ forpositioninother.get_positions():
+ self.add_position(position)
+ returnself
+
A pair of (position, booking) where 'position' is the position that
-that was modified, and where 'booking' is a Booking enum that hints at
-how the lot was booked to this inventory.
+that was modified, and where 'matched' is a MatchResult enum that
+hints at how the lot was booked to this inventory.
@@ -13155,24 +15972,26 @@
Source code in beancount/core/inventory.py
-
def add_position(self, position):
- """Add using a position (with strict lot matching).
- Return True if this position was booked against and reduced another.
-
- Args:
- position: The Posting or Position to add to this inventory.
- Returns:
- A pair of (position, booking) where 'position' is the position that
- that was modified, and where 'booking' is a Booking enum that hints at
- how the lot was booked to this inventory.
- """
- if ASSERTS_TYPES:
- assert hasattr(position, 'units') and hasattr(position, 'cost'), (
- "Invalid type for position: {}".format(position))
- assert isinstance(position.cost, (type(None), Cost)), (
- "Invalid type for cost: {}".format(position.cost))
- return self.add_amount(position.units, position.cost)
-
+
defadd_position(self,position):
+"""Add using a position (with strict lot matching).
+ Return True if this position was booked against and reduced another.
+
+ Args:
+ position: The Posting or Position to add to this inventory.
+ Returns:
+ A pair of (position, booking) where 'position' is the position that
+ that was modified, and where 'matched' is a MatchResult enum that
+ hints at how the lot was booked to this inventory.
+ """
+ ifASSERTS_TYPES:
+ asserthasattr(position,"units")andhasattr(
+ position,"cost"
+ ),"Invalid type for position: {}".format(position)
+ assertisinstance(
+ position.cost,(type(None),Cost)
+ ),"Invalid type for cost: {}".format(position.cost)
+ returnself.add_amount(position.units,position.cost)
+
def average(self):
- """Average all lots of the same currency together.
-
- Use the minimum date from each aggregated set of lots.
-
- Returns:
- An instance of Inventory.
- """
- groups = collections.defaultdict(list)
- for position in self:
- key = (position.units.currency,
- position.cost.currency if position.cost else None)
- groups[key].append(position)
-
- average_inventory = Inventory()
- for (currency, cost_currency), positions in groups.items():
- total_units = sum(position.units.number
- for position in positions)
- # Explicitly skip aggregates when resulting in zero units.
- if total_units == ZERO:
- continue
- units_amount = Amount(total_units, currency)
-
- if cost_currency:
- total_cost = sum(convert.get_cost(position).number
- for position in positions)
- cost_number = (Decimal('Infinity')
- if total_units == ZERO
- else (total_cost / total_units))
- min_date = None
- for pos in positions:
- pos_date = pos.cost.date if pos.cost else None
- if pos_date is not None:
- min_date = (pos_date
- if min_date is None
- else min(min_date, pos_date))
- cost = Cost(cost_number, cost_currency, min_date, None)
- else:
- cost = None
-
- average_inventory.add_amount(units_amount, cost)
-
- return average_inventory
-
+
defaverage(self):
+"""Average all lots of the same currency together.
+
+ Use the minimum date from each aggregated set of lots.
+
+ Returns:
+ An instance of Inventory.
+ """
+ groups=collections.defaultdict(list)
+ forpositioninself:
+ key=(
+ position.units.currency,
+ position.cost.currencyifposition.costelseNone,
+ )
+ groups[key].append(position)
+
+ average_inventory=Inventory()
+ for(currency,cost_currency),positionsingroups.items():
+ total_units=sum(position.units.numberforpositioninpositions)
+ # Explicitly skip aggregates when resulting in zero units.
+ iftotal_units==ZERO:
+ continue
+ units_amount=Amount(total_units,currency)
+
+ ifcost_currency:
+ total_cost=sum(
+ convert.get_cost(position).numberforpositioninpositions
+ )
+ cost_number=(
+ Decimal("Infinity")
+ iftotal_units==ZERO
+ else(total_cost/total_units)
+ )
+ min_date=None
+ forposinpositions:
+ pos_date=pos.cost.dateifpos.costelseNone
+ ifpos_dateisnotNone:
+ min_date=pos_dateifmin_dateisNoneelsemin(min_date,pos_date)
+ cost=Cost(cost_number,cost_currency,min_date,None)
+ else:
+ cost=None
+
+ average_inventory.add_amount(units_amount,cost)
+
+ returnaverage_inventory
+
def cost_currencies(self):
- """Return the list of unit currencies held in this inventory.
-
- Returns:
- A set of currency strings.
- """
- return set(cost.currency
- for _, cost in self.keys()
- if cost is not None)
-
+
defcost_currencies(self):
+"""Return the list of unit currencies held in this inventory.
+
+ Returns:
+ A set of currency strings.
+ """
+ returnset(cost.currencyfor_,costinself.keys()ifcostisnotNone)
+
def currencies(self):
- """Return the list of unit currencies held in this inventory.
-
- Returns:
- A list of currency strings.
- """
- return set(currency for currency, _ in self.keys())
-
+
defcurrencies(self):
+"""Return the list of unit currencies held in this inventory.
+
+ Returns:
+ A list of currency strings.
+ """
+ returnset(currencyforcurrency,_inself.keys())
+
def currency_pairs(self):
- """Return the commodities held in this inventory.
-
- Returns:
- A set of currency strings.
- """
- return set(position.currency_pair() for position in self)
-
+
defcurrency_pairs(self):
+"""Return the commodities held in this inventory.
+
+ Returns:
+ A set of currency strings.
+ """
+ returnset(position.currency_pair()forpositioninself)
+
@staticmethod
-def from_string(string):
- """Create an Inventory from a string. This is useful for writing tests.
-
- Args:
- string: A comma-separated string of <number> <currency> with an
- optional {<number> <currency>} for the cost.
- Returns:
- A new instance of Inventory with the given balances.
- """
- new_inventory = Inventory()
- # We need to split the comma-separated positions but ignore commas
- # occurring within a {...cost...} specification.
- position_strs = re.split(
- r'([-+]?[0-9,.]+\s+[A-Z]+\s*(?:{[^}]*})?)\s*,?\s*', string)[1::2]
- for position_str in position_strs:
- new_inventory.add_position(position_from_string(position_str))
- return new_inventory
-
+
@staticmethod
+deffrom_string(string):
+"""Create an Inventory from a string. This is useful for writing tests.
+
+ Args:
+ string: A comma-separated string of <number> <currency> with an
+ optional {<number> <currency>} for the cost.
+ Returns:
+ A new instance of Inventory with the given balances.
+ """
+ new_inventory=Inventory()
+ # We need to split the comma-separated positions but ignore commas
+ # occurring within a {...cost...} specification.
+ position_strs=re.split(
+ r"([-+]?[0-9,.]+\s+[A-Z]+\s*(?:{[^}]*})?)\s*,?\s*",string
+ )[1::2]
+ forposition_strinposition_strs:
+ new_inventory.add_position(position_from_string(position_str))
+ returnnew_inventory
+
def get_currency_units(self, currency):
- """Fetch the total amount across all the position in the given currency.
- This may sum multiple lots in the same currency denomination.
-
- Args:
- currency: A string, the currency to filter the positions with.
- Returns:
- An instance of Amount, with the given currency.
- """
- total_units = ZERO
- for position in self:
- if position.units.currency == currency:
- total_units += position.units.number
- return Amount(total_units, currency)
-
+
defget_currency_units(self,currency):
+"""Fetch the total amount across all the position in the given currency.
+ This may sum multiple lots in the same currency denomination.
+
+ Args:
+ currency: A string, the currency to filter the positions with.
+ Returns:
+ An instance of Amount, with the given currency.
+ """
+ total_units=ZERO
+ forpositioninself:
+ ifposition.units.currency==currency:
+ total_units+=position.units.number
+ returnAmount(total_units,currency)
+
def get_only_position(self):
- """Return the first position and assert there are no more.
- If the inventory is empty, return None.
- """
- if len(self) > 0:
- if len(self) > 1:
- raise AssertionError("Inventory has more than one expected "
- "position: {}".format(self))
- return next(iter(self))
-
+
defget_only_position(self):
+"""Return the first position and assert there are no more.
+ If the inventory is empty, return None.
+ """
+ iflen(self)>0:
+ iflen(self)>1:
+ raiseAssertionError(
+ "Inventory has more than one expected ""position: {}".format(self)
+ )
+ returnnext(iter(self))
+
def get_positions(self):
- """Return the positions in this inventory.
-
- Returns:
- A shallow copy of the list of positions.
- """
- return list(iter(self))
-
+
defget_positions(self):
+"""Return the positions in this inventory.
+
+ Returns:
+ A shallow copy of the list of positions.
+ """
+ returnlist(iter(self))
+
def is_mixed(self):
- """Return true if the inventory contains a mix of positive and negative lots for
- at least one instrument.
-
- Returns:
- A boolean.
- """
- signs_map = {}
- for position in self:
- sign = position.units.number >= 0
- prev_sign = signs_map.setdefault(position.units.currency, sign)
- if sign != prev_sign:
- return True
- return False
-
+
defis_mixed(self):
+"""Return true if the inventory contains a mix of positive and negative lots for
+ at least one instrument.
+
+ Returns:
+ A boolean.
+ """
+ signs_map={}
+ forpositioninself:
+ sign=position.units.number>=0
+ prev_sign=signs_map.setdefault(position.units.currency,sign)
+ ifsign!=prev_sign:
+ returnTrue
+ returnFalse
+
def is_reduced_by(self, ramount):
- """Return true if the amount could reduce this inventory.
-
- Args:
- ramount: An instance of Amount.
- Returns:
- A boolean.
- """
- if ramount.number == ZERO:
- return False
- for position in self:
- units = position.units
- if (ramount.currency == units.currency and
- not same_sign(ramount.number, units.number)):
- return True
- return False
-
+
defis_reduced_by(self,ramount):
+"""Return true if the amount could reduce this inventory.
+
+ Args:
+ ramount: An instance of Amount.
+ Returns:
+ A boolean.
+ """
+ iframount.number==ZERO:
+ returnFalse
+ forpositioninself:
+ units=position.units
+ iframount.currency==units.currencyandnotsame_sign(
+ ramount.number,units.number
+ ):
+ returnTrue
+ returnFalse
+
def is_small(self, tolerances):
- """Return true if all the positions in the inventory are small.
-
- Args:
- tolerances: A Decimal, the small number of units under which a position
- is considered small, or a dict of currency to such epsilon precision.
- Returns:
- A boolean.
- """
- if isinstance(tolerances, dict):
- for position in self:
- tolerance = tolerances.get(position.units.currency, ZERO)
- if abs(position.units.number) > tolerance:
- return False
- small = True
- else:
- small = not any(abs(position.units.number) > tolerances
- for position in self)
- return small
-
+
defis_small(self,tolerances):
+"""Return true if all the positions in the inventory are small.
+
+ Args:
+ tolerances: A Decimal, the small number of units under which a position
+ is considered small, or a dict of currency to such epsilon precision.
+ Returns:
+ A boolean.
+ """
+ ifisinstance(tolerances,dict):
+ forpositioninself:
+ tolerance=tolerances.get(position.units.currency,ZERO)
+ ifabs(position.units.number)>tolerance:
+ returnFalse
+ small=True
+ else:
+ small=notany(abs(position.units.number)>tolerancesforpositioninself)
+ returnsmall
+
def reduce(self, reducer, *args):
- """Reduce an inventory using one of the conversion functions.
-
- See functions in beancount.core.convert.
-
- Returns:
- An instance of Inventory.
- """
- inventory = Inventory()
- for position in self:
- inventory.add_amount(reducer(position, *args))
- return inventory
-
+
defreduce(self,reducer,*args):
+"""Reduce an inventory using one of the conversion functions.
+
+ See functions in beancount.core.convert.
+
+ Returns:
+ An instance of Inventory.
+ """
+ inventory=Inventory()
+ forpositioninself:
+ inventory.add_amount(reducer(position,*args))
+ returninventory
+
def segregate_units(self, currencies):
- """Split up the list of positions to the given currencies.
-
- Args:
- currencies: A list of currency strings, the currencies to isolate.
- Returns:
- A dict of currency to Inventory instances.
- """
- per_currency_dict = {currency: Inventory()
- for currency in currencies}
- per_currency_dict[None] = Inventory()
- for position in self:
- currency = position.units.currency
- key = (currency if currency in currencies else None)
- per_currency_dict[key].add_position(position)
- return per_currency_dict
-
+
defsegregate_units(self,currencies):
+"""Split up the list of positions to the given currencies.
+
+ Args:
+ currencies: A list of currency strings, the currencies to isolate.
+ Returns:
+ A dict of currency to Inventory instances.
+ """
+ per_currency_dict={currency:Inventory()forcurrencyincurrencies}
+ per_currency_dict[None]=Inventory()
+ forpositioninself:
+ currency=position.units.currency
+ key=currencyifcurrencyincurrencieselseNone
+ per_currency_dict[key].add_position(position)
+ returnper_currency_dict
+
Split up the list of positions to their corresponding currencies.
+
+
+
+
+
+
+
+
+
Returns:
+
+
+
A dict of currency to Inventory instances.
+
+
+
+
+
+
+ Source code in beancount/core/inventory.py
+
defsplit(self):
+"""Split up the list of positions to their corresponding currencies.
+
+ Returns:
+ A dict of currency to Inventory instances.
+ """
+ per_currency_dict=collections.defaultdict(Inventory)
+ forpositioninself:
+ per_currency_dict[position.units.currency].add_position(position)
+ returndict(per_currency_dict)
+
@@ -14017,7 +16887,7 @@
-beancount.core.inventory.Inventory.to_string(self, dformat=<beancount.core.display_context.DisplayFormatter object at 0x78e868ae5e50>, parens=True)
+beancount.core.inventory.Inventory.to_string(self,dformat=<beancount.core.display_context.DisplayFormatterobjectat0x78fcde6b7290>,parens=True)
@@ -14061,19 +16931,18 @@
Source code in beancount/core/inventory.py
-
def to_string(self, dformat=DEFAULT_FORMATTER, parens=True):
- """Convert an Inventory instance to a printable string.
-
- Args:
- dformat: An instance of DisplayFormatter.
- parents: A boolean, true if we should surround the results by parentheses.
- Returns:
- A formatted string of the quantized amount and symbol.
- """
- fmt = '({})' if parens else '{}'
- return fmt.format(
- ', '.join(pos.to_string(dformat) for pos in sorted(self)))
-
+
defto_string(self,dformat=DEFAULT_FORMATTER,parens=True):
+"""Convert an Inventory instance to a printable string.
+
+ Args:
+ dformat: An instance of DisplayFormatter.
+ parents: A boolean, true if we should surround the results by parentheses.
+ Returns:
+ A formatted string of the quantized amount and symbol.
+ """
+ fmt="({})"ifparenselse"{}"
+ returnfmt.format(", ".join(pos.to_string(dformat)forposinsorted(self)))
+
def check_invariants(inv):
- """Check the invariants of the Inventory.
-
- Args:
- inventory: An instance of Inventory.
- Returns:
- True if the invariants are respected.
- """
- # Check that all the keys are unique.
- lots = set((pos.units.currency, pos.cost) for pos in inv)
- assert len(lots) == len(inv), "Invalid inventory: {}".format(inv)
- # Check that none of the amounts is zero.
- for pos in inv:
- assert pos.units.number != ZERO, "Invalid position size: {}".format(pos)
-
+
defcheck_invariants(inv):
+"""Check the invariants of the Inventory.
+
+ Args:
+ inventory: An instance of Inventory.
+ Returns:
+ True if the invariants are respected.
+ """
+ # Check that all the keys are unique.
+ lots=set((pos.units.currency,pos.cost)forposininv)
+ assertlen(lots)==len(inv),"Invalid inventory: {}".format(inv)
+ # Check that none of the amounts is zero.
+ forposininv:
+ assertpos.units.number!=ZERO,"Invalid position size: {}".format(pos)
+
@staticmethod
-def from_string(string):
- """Create an Inventory from a string. This is useful for writing tests.
-
- Args:
- string: A comma-separated string of <number> <currency> with an
- optional {<number> <currency>} for the cost.
- Returns:
- A new instance of Inventory with the given balances.
- """
- new_inventory = Inventory()
- # We need to split the comma-separated positions but ignore commas
- # occurring within a {...cost...} specification.
- position_strs = re.split(
- r'([-+]?[0-9,.]+\s+[A-Z]+\s*(?:{[^}]*})?)\s*,?\s*', string)[1::2]
- for position_str in position_strs:
- new_inventory.add_position(position_from_string(position_str))
- return new_inventory
-
+
@staticmethod
+deffrom_string(string):
+"""Create an Inventory from a string. This is useful for writing tests.
+
+ Args:
+ string: A comma-separated string of <number> <currency> with an
+ optional {<number> <currency>} for the cost.
+ Returns:
+ A new instance of Inventory with the given balances.
+ """
+ new_inventory=Inventory()
+ # We need to split the comma-separated positions but ignore commas
+ # occurring within a {...cost...} specification.
+ position_strs=re.split(
+ r"([-+]?[0-9,.]+\s+[A-Z]+\s*(?:{[^}]*})?)\s*,?\s*",string
+ )[1::2]
+ forposition_strinposition_strs:
+ new_inventory.add_position(position_from_string(position_str))
+ returnnew_inventory
+
def D(strord=None):
- """Convert a string into a Decimal object.
-
- This is used in parsing amounts from files in the importers. This is the
- main function you should use to build all numbers the system manipulates
- (never use floating-point in an accounting system). Commas are stripped and
- ignored, as they are assumed to be thousands separators (the French comma
- separator as decimal is not supported). This function just returns the
- argument if it is already a Decimal object, for convenience.
-
- Args:
- strord: A string or Decimal instance.
- Returns:
- A Decimal instance.
- """
- try:
- # Note: try a map lookup and optimize performance here.
- if strord is None or strord == '':
- return Decimal()
- elif isinstance(strord, str):
- return Decimal(_CLEAN_NUMBER_RE.sub('', strord))
- elif isinstance(strord, Decimal):
- return strord
- elif isinstance(strord, (int, float)):
- return Decimal(strord)
- else:
- assert strord is None, "Invalid value to convert: {}".format(strord)
- except Exception as exc:
- raise ValueError("Impossible to create Decimal instance from {!s}: {}".format(
- strord, exc))
-
+
defD(strord=None):
+"""Convert a string into a Decimal object.
+
+ This is used in parsing amounts from files in the importers. This is the
+ main function you should use to build all numbers the system manipulates
+ (never use floating-point in an accounting system). Commas are stripped and
+ ignored, as they are assumed to be thousands separators (the French comma
+ separator as decimal is not supported). This function just returns the
+ argument if it is already a Decimal object, for convenience.
+
+ Args:
+ strord: A string or Decimal instance.
+ Returns:
+ A Decimal instance.
+ """
+ try:
+ # Note: try a map lookup and optimize performance here.
+ ifstrordisNoneorstrord=="":
+ returnDecimal()
+ elifisinstance(strord,str):
+ returnDecimal(_CLEAN_NUMBER_RE.sub("",strord))
+ elifisinstance(strord,Decimal):
+ returnstrord
+ elifisinstance(strord,(int,float)):
+ returnDecimal(strord)
+ else:
+ assertstrordisNone,"Invalid value to convert: {}".format(strord)
+ exceptExceptionasexc:
+ raiseValueError(
+ "Impossible to create Decimal instance from {!s}: {}".format(strord,exc)
+ )fromexc
+
def is_fast_decimal(decimal_module):
- "Return true if a fast C decimal implementation is installed."
- return isinstance(decimal_module.Decimal().sqrt, types.BuiltinFunctionType)
-
+
defauto_quantize(number:Decimal,threshold:float)->Decimal:
+"""Automatically quantize the number at a given threshold.
+
+ For example, with a threshold of 0.01, this will convert:
+
+ 20.899999618530273 20.9
+ 20.290000000000000000000000000000 20.29
+ 110.90 110.9
+ 11.0600004196167 11.06
+ 10.539999961853027 10.54
+ 134.3300018310547 134.33
+ 253.920200000000000000000000000000 253.9202
+
+ """
+ exponent=auto_quantized_exponent(number,threshold)
+ ifexponent!=number.as_tuple().exponent:
+ quant=TEN**exponent
+ qnumber=number.quantize(quant).normalize()
+ returnqnumber
+ else:
+ returnnumber
+
Automatically infer the exponent that would be used below a given threshold.
+
+
+ Source code in beancount/core/number.py
+
defauto_quantized_exponent(number:Decimal,threshold:float)->int:
+"""Automatically infer the exponent that would be used below a given threshold."""
+ dtuple=number.normalize().as_tuple()
+ norm=Decimal(dtuple._replace(sign=0,exponent=-len(dtuple.digits)))
+ low_threshold=threshold
+ high_threshold=1.0-low_threshold
+ whilenorm!=ZERO:
+ ifnot(low_threshold<=norm<=high_threshold):
+ break
+ ntuple=norm.scaleb(1).as_tuple()
+ norm=Decimal(ntuple._replace(digits=ntuple.digits[ntuple.exponent:]))
+ returndtuple.exponent-norm.as_tuple().exponent
+
Given a list of numbers from floats, infer the common quantization.
+
For a series of numbers provided as floats, e.g., prices from a price
+source, we'd like to infer what the right quantization that should be used
+to avoid rounding errors above some threshold.
+
from the numbers. This simple algorithm auto-quantizes all the numbers and
+quantizes all of them at the maximum precision that would result in rounding
+under the threshold.
+
+
+
+
+
+
+
+
+
Parameters:
+
+
+
prices – A list of float or Decimal prices to infer from. If floats are
+provided, conversion is done naively.
+
threshold (float) – A fraction, the maximum error to tolerate before stopping the
+search.
+
+
+
+
+
+
+
+
+
+
+
+
+
Returns:
+
+
+
Optional[decimal.Decimal] – A decimal object to use with decimal.Decimal.quantize().
+
+
+
+
+
+
+ Source code in beancount/core/number.py
+
definfer_quantum_from_list(
+ numbers:List[Decimal],threshold:float=0.01
+)->Optional[Decimal]:
+"""Given a list of numbers from floats, infer the common quantization.
+
+ For a series of numbers provided as floats, e.g., prices from a price
+ source, we'd like to infer what the right quantization that should be used
+ to avoid rounding errors above some threshold.
+
+
+ from the numbers. This simple algorithm auto-quantizes all the numbers and
+ quantizes all of them at the maximum precision that would result in rounding
+ under the threshold.
+
+ Args:
+ prices: A list of float or Decimal prices to infer from. If floats are
+ provided, conversion is done naively.
+ threshold: A fraction, the maximum error to tolerate before stopping the
+ search.
+ Returns:
+ A decimal object to use with decimal.Decimal.quantize().
+
+ """
+ # Auto quantize all the numbers.
+ qnumbers=[auto_quantize(num,threshold)fornuminnumbers]
+ exponent=max(num_fractional_digits(n)forninqnumbers)
+ return-exponent
+
def round_to(number, increment):
- """Round a number *down* to a particular increment.
-
- Args:
- number: A Decimal, the number to be rounded.
- increment: A Decimal, the size of the increment.
- Returns:
- A Decimal, the rounded number.
- """
- return int((number / increment)) * increment
-
+
defround_to(number,increment):
+"""Round a number *down* to a particular increment.
+
+ Args:
+ number: A Decimal, the number to be rounded.
+ increment: A Decimal, the size of the increment.
+ Returns:
+ A Decimal, the rounded number.
+ """
+ returnint((number/increment))*increment
+
def same_sign(number1, number2):
- """Return true if both numbers have the same sign.
-
- Args:
- number1: An instance of Decimal.
- number2: An instance of Decimal.
- Returns:
- A boolean.
- """
- return (number1 >= 0) == (number2 >= 0)
-
+
defsame_sign(number1,number2):
+"""Return true if both numbers have the same sign.
+
+ Args:
+ number1: An instance of Decimal.
+ number2: An instance of Decimal.
+ Returns:
+ A boolean.
+ """
+ return(number1>=0)==(number2>=0)
+
def __abs__(self):
- """Return the absolute value of the position.
-
- Returns:
- An instance of Position with the absolute units.
- """
- return Position(amount_abs(self.units), self.cost)
-
+
def__abs__(self):
+"""Return the absolute value of the position.
+
+ Returns:
+ An instance of Position with the absolute units.
+ """
+ returnPosition(amount_abs(self.units),self.cost)
+
def __copy__(self):
- """Shallow copy, except for the lot, which can be shared. This is important for
- performance reasons; a lot of time is spent here during balancing.
-
- Returns:
- A shallow copy of this position.
- """
- # Note: We use Decimal() for efficiency.
- return Position(copy.copy(self.units), copy.copy(self.cost))
-
+
def__copy__(self):
+"""Shallow copy, except for the lot, which can be shared. This is important for
+ performance reasons; a lot of time is spent here during balancing.
+
+ Returns:
+ A shallow copy of this position.
+ """
+ # Note: We use Decimal() for efficiency.
+ returnPosition(copy.copy(self.units),copy.copy(self.cost))
+
def __eq__(self, other):
- """Equality comparison with another Position. The objects are considered equal
- if both number and lot are matching, and if the number of units is zero
- and the other position is None, that is also okay.
-
- Args:
- other: An instance of Position, or None.
- Returns:
- A boolean, true if the positions are equal.
- """
- return (self.units.number == ZERO
- if other is None
- else (self.units == other.units and self.cost == other.cost))
-
+
def__eq__(self,other):
+"""Equality comparison with another Position. The objects are considered equal
+ if both number and lot are matching, and if the number of units is zero
+ and the other position is None, that is also okay.
+
+ Args:
+ other: An instance of Position, or None.
+ Returns:
+ A boolean, true if the positions are equal.
+ """
+ return(
+ self.units.number==ZERO
+ ifotherisNone
+ else(self.units==other.unitsandself.cost==other.cost)
+ )
+
def __hash__(self):
- """Compute a hash for this position.
-
- Returns:
- A hash of this position object.
- """
- return hash((self.units, self.cost))
-
+
def__hash__(self):
+"""Compute a hash for this position.
+
+ Returns:
+ A hash of this position object.
+ """
+ returnhash((self.units,self.cost))
+
def __lt__(self, other):
- """A less-than comparison operator for positions.
-
- Args:
- other: Another instance of Position.
- Returns:
- True if this positions is smaller than the other position.
- """
- return self.sortkey() < other.sortkey()
-
+
def__lt__(self,other):
+"""A less-than comparison operator for positions.
+
+ Args:
+ other: Another instance of Position.
+ Returns:
+ True if this positions is smaller than the other position.
+ """
+ returnself.sortkey()<other.sortkey()
+
def __mul__(self, scalar):
- """Scale/multiply the contents of the position.
-
- Args:
- scalar: A Decimal.
- Returns:
- An instance of Inventory.
- """
- return Position(amount_mul(self.units, scalar), self.cost)
-
+
def__mul__(self,scalar):
+"""Scale/multiply the contents of the position.
+
+ Args:
+ scalar: A Decimal.
+ Returns:
+ An instance of Inventory.
+ """
+ returnPosition(amount_mul(self.units,scalar),self.cost)
+
def get_negative(self):
- """Get a copy of this position but with a negative number.
-
- Returns:
- An instance of Position which represents the inverse of this Position.
- """
- # Note: We use Decimal() for efficiency.
- return Position(-self.units, self.cost)
-
+
defget_negative(self):
+"""Get a copy of this position but with a negative number.
+
+ Returns:
+ An instance of Position which represents the inverse of this Position.
+ """
+ # Note: We use Decimal() for efficiency.
+ returnPosition(-self.units,self.cost)
+
def __new__(cls, units, cost=None):
- assert isinstance(units, Amount), (
- "Expected an Amount for units; received '{}'".format(units))
- assert cost is None or isinstance(cost, Position.cost_types), (
- "Expected a Cost for cost; received '{}'".format(cost))
- return _Position.__new__(cls, units, cost)
-
+
def__new__(cls,units,cost=None):
+ assertisinstance(
+ units,Amount
+ ),"Expected an Amount for units; received '{}'".format(units)
+ assertcostisNoneorisinstance(
+ cost,Position.cost_types
+ ),"Expected a Cost for cost; received '{}'".format(cost)
+ return_Position.__new__(cls,units,cost)
+
def __str__(self):
- """Return a string representation of the position.
-
- Returns:
- A string, a printable representation of the position.
- """
- return self.to_string()
-
+
def__str__(self):
+"""Return a string representation of the position.
+
+ Returns:
+ A string, a printable representation of the position.
+ """
+ returnself.to_string()
+
def __str__(self):
- """Return a string representation of the position.
-
- Returns:
- A string, a printable representation of the position.
- """
- return self.to_string()
-
+
def__str__(self):
+"""Return a string representation of the position.
+
+ Returns:
+ A string, a printable representation of the position.
+ """
+ returnself.to_string()
+
def currency_pair(self):
- """Return the currency pair associated with this position.
-
- Returns:
- A pair of a currency string and a cost currency string or None.
- """
- return (self.units.currency, self.cost.currency if self.cost else None)
-
+
defcurrency_pair(self):
+"""Return the currency pair associated with this position.
+
+ Returns:
+ A pair of a currency string and a cost currency string or None.
+ """
+ return(self.units.currency,self.cost.currencyifself.costelseNone)
+
@staticmethod
-def from_amounts(units, cost_amount=None):
- """Create a position from an amount and a cost.
-
- Args:
- amount: An amount, that represents the number of units and the lot currency.
- cost_amount: If not None, represents the cost amount.
- Returns:
- A Position instance.
- """
- assert cost_amount is None or isinstance(cost_amount, Amount), (
- "Invalid type for cost: {}".format(cost_amount))
- cost = (Cost(cost_amount.number, cost_amount.currency, None, None)
- if cost_amount else
- None)
- return Position(units, cost)
-
+
@staticmethod
+deffrom_amounts(units,cost_amount=None):
+"""Create a position from an amount and a cost.
+
+ Args:
+ amount: An amount, that represents the number of units and the lot currency.
+ cost_amount: If not None, represents the cost amount.
+ Returns:
+ A Position instance.
+ """
+ assertcost_amountisNoneorisinstance(
+ cost_amount,Amount
+ ),"Invalid type for cost: {}".format(cost_amount)
+ cost=(
+ Cost(cost_amount.number,cost_amount.currency,None,None)
+ ifcost_amount
+ elseNone
+ )
+ returnPosition(units,cost)
+
@staticmethod
-def from_string(string):
- """Create a position from a string specification.
-
- This is a miniature parser used for building tests.
-
- Args:
- string: A string of <number> <currency> with an optional {<number>
- <currency>} for the cost, similar to the parser syntax.
- Returns:
- A new instance of Position.
- """
- match = re.match(
- (r'\s*({})\s+({})'
- r'(?:\s+{{([^}}]*)}})?'
- r'\s*$').format(NUMBER_RE, CURRENCY_RE),
- string)
- if not match:
- raise ValueError("Invalid string for position: '{}'".format(string))
-
- number = D(match.group(1))
- currency = match.group(2)
-
- # Parse a cost expression.
- cost_number = None
- cost_currency = None
- date = None
- label = None
- cost_expression = match.group(3)
- if match.group(3):
- expressions = [expr.strip() for expr in re.split('[,/]', cost_expression)]
- for expr in expressions:
-
- # Match a compound number.
- match = re.match(
- r'({NUMBER_RE})\s*(?:#\s*({NUMBER_RE}))?\s+({CURRENCY_RE})$'
- .format(NUMBER_RE=NUMBER_RE, CURRENCY_RE=CURRENCY_RE),
- expr
- )
- if match:
- per_number, total_number, cost_currency = match.group(1, 2, 3)
- per_number = D(per_number) if per_number else ZERO
- total_number = D(total_number) if total_number else ZERO
- if total_number:
- # Calculate the per-unit cost.
- total = number * per_number + total_number
- per_number = total / number
- cost_number = per_number
- continue
-
- # Match a date.
- match = re.match(r'(\d\d\d\d)[-/](\d\d)[-/](\d\d)$', expr)
- if match:
- date = datetime.date(*map(int, match.group(1, 2, 3)))
- continue
-
- # Match a label.
- match = re.match(r'"([^"]+)*"$', expr)
- if match:
- label = match.group(1)
- continue
-
- # Match a merge-cost marker.
- match = re.match(r'\*$', expr)
- if match:
- raise ValueError("Merge-code not supported in string constructor.")
-
- raise ValueError("Invalid cost component: '{}'".format(expr))
- cost = Cost(cost_number, cost_currency, date, label)
- else:
- cost = None
-
- return Position(Amount(number, currency), cost)
-
+
@staticmethod
+deffrom_string(string):
+"""Create a position from a string specification.
+
+ This is a miniature parser used for building tests.
+
+ Args:
+ string: A string of <number> <currency> with an optional {<number>
+ <currency>} for the cost, similar to the parser syntax.
+ Returns:
+ A new instance of Position.
+ """
+ match=re.match(
+ (r"\s*({})\s+({})"r"(?:\s+{{([^}}]*)}})?"r"\s*$").format(
+ NUMBER_RE,CURRENCY_RE
+ ),
+ string,
+ )
+ ifnotmatch:
+ raiseValueError("Invalid string for position: '{}'".format(string))
+
+ number=D(match.group(1))
+ currency=match.group(2)
+
+ # Parse a cost expression.
+ cost_number=None
+ cost_currency=None
+ date=None
+ label=None
+ cost_expression=match.group(3)
+ ifmatch.group(3):
+ expressions=[expr.strip()forexprinre.split("[,/]",cost_expression)]
+ forexprinexpressions:
+ # Match a compound number.
+ match=re.match(
+ r"({NUMBER_RE})\s*(?:#\s*({NUMBER_RE}))?\s+({CURRENCY_RE})$".format(
+ NUMBER_RE=NUMBER_RE,CURRENCY_RE=CURRENCY_RE
+ ),
+ expr,
+ )
+ ifmatch:
+ per_number,total_number,cost_currency=match.group(1,2,3)
+ per_number=D(per_number)ifper_numberelseZERO
+ total_number=D(total_number)iftotal_numberelseZERO
+ iftotal_number:
+ # Calculate the per-unit cost.
+ total=number*per_number+total_number
+ per_number=total/number
+ cost_number=per_number
+ continue
+
+ # Match a date.
+ match=re.match(r"(\d\d\d\d)[-/](\d\d)[-/](\d\d)$",expr)
+ ifmatch:
+ date=datetime.date(*map(int,match.group(1,2,3)))
+ continue
+
+ # Match a label.
+ match=re.match(r'"([^"]+)*"$',expr)
+ ifmatch:
+ label=match.group(1)
+ continue
+
+ # Match a merge-cost marker.
+ match=re.match(r"\*$",expr)
+ ifmatch:
+ raiseValueError("Merge-code not supported in string constructor.")
+
+ raiseValueError("Invalid cost component: '{}'".format(expr))
+ cost=Cost(cost_number,cost_currency,date,label)
+ else:
+ cost=None
+
+ returnPosition(Amount(number,currency),cost)
+
def get_negative(self):
- """Get a copy of this position but with a negative number.
-
- Returns:
- An instance of Position which represents the inverse of this Position.
- """
- # Note: We use Decimal() for efficiency.
- return Position(-self.units, self.cost)
-
+
defget_negative(self):
+"""Get a copy of this position but with a negative number.
+
+ Returns:
+ An instance of Position which represents the inverse of this Position.
+ """
+ # Note: We use Decimal() for efficiency.
+ returnPosition(-self.units,self.cost)
+
def is_negative_at_cost(self):
- """Return true if the position is held at cost and negative.
-
- Returns:
- A boolean.
- """
- return (self.units.number < ZERO and self.cost is not None)
-
+
defis_negative_at_cost(self):
+"""Return true if the position is held at cost and negative.
+
+ Returns:
+ A boolean.
+ """
+ returnself.units.number<ZEROandself.costisnotNone
+
def sortkey(self):
- """Return a key to sort positions by. This key depends on the order of the
- currency of the lot (we want to order common currencies first) and the
- number of units.
-
- Returns:
- A tuple, used to sort lists of positions.
- """
- currency = self.units.currency
- order_units = CURRENCY_ORDER.get(currency, NCURRENCIES + len(currency))
- if self.cost is not None:
- cost_number = self.cost.number
- cost_currency = self.cost.currency
- else:
- cost_number = ZERO
- cost_currency = ''
-
- return (order_units, cost_number, cost_currency, self.units.number)
-
+
defsortkey(self):
+"""Return a key to sort positions by. This key depends on the order of the
+ currency of the lot (we want to order common currencies first) and the
+ number of units.
+
+ Returns:
+ A tuple, used to sort lists of positions.
+ """
+ currency=self.units.currency
+ order_units=CURRENCY_ORDER.get(currency,NCURRENCIES+len(currency))
+ ifself.costisnotNone:
+ cost_number=self.cost.number
+ cost_currency=self.cost.currency
+ else:
+ cost_number=ZERO
+ cost_currency=""
+
+ return(order_units,cost_number,cost_currency,self.units.number)
+
@@ -15855,7 +18959,7 @@
-beancount.core.position.Position.to_string(self, dformat=<beancount.core.display_context.DisplayFormatter object at 0x78e868ae5e50>, detail=True)
+beancount.core.position.Position.to_string(self,dformat=<beancount.core.display_context.DisplayFormatterobjectat0x78fcde6b7290>,detail=True)
@@ -15866,11 +18970,10 @@
Source code in beancount/core/position.py
-
def to_string(self, dformat=DEFAULT_FORMATTER, detail=True):
- """Render the position to a string.See to_string() for details.
- """
- return to_string(self, dformat, detail)
-
+
defto_string(self,dformat=DEFAULT_FORMATTER,detail=True):
+"""Render the position to a string.See to_string() for details."""
+ returnto_string(self,dformat,detail)
+
def cost_to_str(cost, dformat, detail=True):
- """Format an instance of Cost or a CostSpec to a string.
-
- Args:
- cost: An instance of Cost or CostSpec.
- dformat: A DisplayFormatter object.
- detail: A boolean, true if we should render the non-amount components.
- Returns:
- A string, suitable for formatting.
- """
- strlist = []
-
- if isinstance(cost, Cost):
- if isinstance(cost.number, Decimal):
- strlist.append(Amount(cost.number, cost.currency).to_string(dformat))
- if detail:
- if cost.date:
- strlist.append(cost.date.isoformat())
- if cost.label:
- strlist.append('"{}"'.format(cost.label))
-
- elif isinstance(cost, CostSpec):
- if isinstance(cost.number_per, Decimal) or isinstance(cost.number_total, Decimal):
- amountlist = []
- if isinstance(cost.number_per, Decimal):
- amountlist.append(dformat.format(cost.number_per))
- if isinstance(cost.number_total, Decimal):
- amountlist.append('#')
- amountlist.append(dformat.format(cost.number_total))
- if isinstance(cost.currency, str):
- amountlist.append(cost.currency)
- strlist.append(' '.join(amountlist))
- if detail:
- if cost.date:
- strlist.append(cost.date.isoformat())
- if cost.label:
- strlist.append('"{}"'.format(cost.label))
- if cost.merge:
- strlist.append('*')
-
- return ', '.join(strlist)
-
+
defcost_to_str(cost,dformat,detail=True):
+"""Format an instance of Cost or a CostSpec to a string.
+
+ Args:
+ cost: An instance of Cost or CostSpec.
+ dformat: A DisplayFormatter object.
+ detail: A boolean, true if we should render the non-amount components.
+ Returns:
+ A string, suitable for formatting.
+ """
+ strlist=[]
+
+ ifisinstance(cost,Cost):
+ ifisinstance(cost.number,Decimal):
+ strlist.append(Amount(cost.number,cost.currency).to_string(dformat))
+ ifdetail:
+ ifcost.date:
+ strlist.append(cost.date.isoformat())
+ ifcost.label:
+ strlist.append('"{}"'.format(cost.label))
+
+ elifisinstance(cost,CostSpec):
+ ifisinstance(cost.number_per,Decimal)orisinstance(cost.number_total,Decimal):
+ amountlist=[]
+ ifisinstance(cost.number_per,Decimal):
+ amountlist.append(dformat.format(cost.number_per))
+ ifisinstance(cost.number_total,Decimal):
+ amountlist.append("#")
+ amountlist.append(dformat.format(cost.number_total))
+ ifisinstance(cost.currency,str):
+ amountlist.append(cost.currency)
+ strlist.append(" ".join(amountlist))
+ ifdetail:
+ ifcost.date:
+ strlist.append(cost.date.isoformat())
+ ifcost.label:
+ strlist.append('"{}"'.format(cost.label))
+ ifcost.merge:
+ strlist.append("*")
+
+ return", ".join(strlist)
+
@staticmethod
-def from_amounts(units, cost_amount=None):
- """Create a position from an amount and a cost.
-
- Args:
- amount: An amount, that represents the number of units and the lot currency.
- cost_amount: If not None, represents the cost amount.
- Returns:
- A Position instance.
- """
- assert cost_amount is None or isinstance(cost_amount, Amount), (
- "Invalid type for cost: {}".format(cost_amount))
- cost = (Cost(cost_amount.number, cost_amount.currency, None, None)
- if cost_amount else
- None)
- return Position(units, cost)
-
+
@staticmethod
+deffrom_amounts(units,cost_amount=None):
+"""Create a position from an amount and a cost.
+
+ Args:
+ amount: An amount, that represents the number of units and the lot currency.
+ cost_amount: If not None, represents the cost amount.
+ Returns:
+ A Position instance.
+ """
+ assertcost_amountisNoneorisinstance(
+ cost_amount,Amount
+ ),"Invalid type for cost: {}".format(cost_amount)
+ cost=(
+ Cost(cost_amount.number,cost_amount.currency,None,None)
+ ifcost_amount
+ elseNone
+ )
+ returnPosition(units,cost)
+
@staticmethod
-def from_string(string):
- """Create a position from a string specification.
-
- This is a miniature parser used for building tests.
-
- Args:
- string: A string of <number> <currency> with an optional {<number>
- <currency>} for the cost, similar to the parser syntax.
- Returns:
- A new instance of Position.
- """
- match = re.match(
- (r'\s*({})\s+({})'
- r'(?:\s+{{([^}}]*)}})?'
- r'\s*$').format(NUMBER_RE, CURRENCY_RE),
- string)
- if not match:
- raise ValueError("Invalid string for position: '{}'".format(string))
-
- number = D(match.group(1))
- currency = match.group(2)
-
- # Parse a cost expression.
- cost_number = None
- cost_currency = None
- date = None
- label = None
- cost_expression = match.group(3)
- if match.group(3):
- expressions = [expr.strip() for expr in re.split('[,/]', cost_expression)]
- for expr in expressions:
-
- # Match a compound number.
- match = re.match(
- r'({NUMBER_RE})\s*(?:#\s*({NUMBER_RE}))?\s+({CURRENCY_RE})$'
- .format(NUMBER_RE=NUMBER_RE, CURRENCY_RE=CURRENCY_RE),
- expr
- )
- if match:
- per_number, total_number, cost_currency = match.group(1, 2, 3)
- per_number = D(per_number) if per_number else ZERO
- total_number = D(total_number) if total_number else ZERO
- if total_number:
- # Calculate the per-unit cost.
- total = number * per_number + total_number
- per_number = total / number
- cost_number = per_number
- continue
-
- # Match a date.
- match = re.match(r'(\d\d\d\d)[-/](\d\d)[-/](\d\d)$', expr)
- if match:
- date = datetime.date(*map(int, match.group(1, 2, 3)))
- continue
-
- # Match a label.
- match = re.match(r'"([^"]+)*"$', expr)
- if match:
- label = match.group(1)
- continue
-
- # Match a merge-cost marker.
- match = re.match(r'\*$', expr)
- if match:
- raise ValueError("Merge-code not supported in string constructor.")
-
- raise ValueError("Invalid cost component: '{}'".format(expr))
- cost = Cost(cost_number, cost_currency, date, label)
- else:
- cost = None
-
- return Position(Amount(number, currency), cost)
-
+
@staticmethod
+deffrom_string(string):
+"""Create a position from a string specification.
+
+ This is a miniature parser used for building tests.
+
+ Args:
+ string: A string of <number> <currency> with an optional {<number>
+ <currency>} for the cost, similar to the parser syntax.
+ Returns:
+ A new instance of Position.
+ """
+ match=re.match(
+ (r"\s*({})\s+({})"r"(?:\s+{{([^}}]*)}})?"r"\s*$").format(
+ NUMBER_RE,CURRENCY_RE
+ ),
+ string,
+ )
+ ifnotmatch:
+ raiseValueError("Invalid string for position: '{}'".format(string))
+
+ number=D(match.group(1))
+ currency=match.group(2)
+
+ # Parse a cost expression.
+ cost_number=None
+ cost_currency=None
+ date=None
+ label=None
+ cost_expression=match.group(3)
+ ifmatch.group(3):
+ expressions=[expr.strip()forexprinre.split("[,/]",cost_expression)]
+ forexprinexpressions:
+ # Match a compound number.
+ match=re.match(
+ r"({NUMBER_RE})\s*(?:#\s*({NUMBER_RE}))?\s+({CURRENCY_RE})$".format(
+ NUMBER_RE=NUMBER_RE,CURRENCY_RE=CURRENCY_RE
+ ),
+ expr,
+ )
+ ifmatch:
+ per_number,total_number,cost_currency=match.group(1,2,3)
+ per_number=D(per_number)ifper_numberelseZERO
+ total_number=D(total_number)iftotal_numberelseZERO
+ iftotal_number:
+ # Calculate the per-unit cost.
+ total=number*per_number+total_number
+ per_number=total/number
+ cost_number=per_number
+ continue
+
+ # Match a date.
+ match=re.match(r"(\d\d\d\d)[-/](\d\d)[-/](\d\d)$",expr)
+ ifmatch:
+ date=datetime.date(*map(int,match.group(1,2,3)))
+ continue
+
+ # Match a label.
+ match=re.match(r'"([^"]+)*"$',expr)
+ ifmatch:
+ label=match.group(1)
+ continue
+
+ # Match a merge-cost marker.
+ match=re.match(r"\*$",expr)
+ ifmatch:
+ raiseValueError("Merge-code not supported in string constructor.")
+
+ raiseValueError("Invalid cost component: '{}'".format(expr))
+ cost=Cost(cost_number,cost_currency,date,label)
+ else:
+ cost=None
+
+ returnPosition(Amount(number,currency),cost)
+
def get_position(posting):
- """Build a Position instance from a Posting instance.
-
- Args:
- posting: An instance of Posting.
- Returns:
- An instance of Position.
- """
- return Position(posting.units, posting.cost)
-
+
defget_position(posting):
+"""Build a Position instance from a Posting instance.
+
+ Args:
+ posting: An instance of Posting.
+ Returns:
+ An instance of Position.
+ """
+ returnPosition(posting.units,posting.cost)
+
@@ -16262,7 +19369,7 @@
-beancount.core.position.to_string(pos, dformat=<beancount.core.display_context.DisplayFormatter object at 0x78e868ae5e50>, detail=True)
+beancount.core.position.to_string(pos,dformat=<beancount.core.display_context.DisplayFormatterobjectat0x78fcde6b7290>,detail=True)
@@ -16309,23 +19416,23 @@
Source code in beancount/core/position.py
-
def to_string(pos, dformat=DEFAULT_FORMATTER, detail=True):
- """Render the Position or Posting instance to a string.
-
- Args:
- pos: An instance of Position or Posting.
- dformat: An instance of DisplayFormatter.
- detail: A boolean, true if we should only render the lot details
- beyond the cost (lot-date, label, etc.). If false, we only render
- the cost, if present.
- Returns:
- A string, the rendered position.
- """
- pos_str = pos.units.to_string(dformat)
- if pos.cost is not None:
- pos_str = '{} {{{}}}'.format(pos_str, cost_to_str(pos.cost, dformat, detail))
- return pos_str
-
+
defto_string(pos,dformat=DEFAULT_FORMATTER,detail=True):
+"""Render the Position or Posting instance to a string.
+
+ Args:
+ pos: An instance of Position or Posting.
+ dformat: An instance of DisplayFormatter.
+ detail: A boolean, true if we should only render the lot details
+ beyond the cost (lot-date, label, etc.). If false, we only render
+ the cost, if present.
+ Returns:
+ A string, the rendered position.
+ """
+ pos_str=pos.units.to_string(dformat)
+ ifpos.costisnotNone:
+ pos_str="{} {{{}}}".format(pos_str,cost_to_str(pos.cost,dformat,detail))
+ returnpos_str
+
def build_price_map(entries):
- """Build a price map from a list of arbitrary entries.
-
- If multiple prices are found for the same (currency, cost-currency) pair at
- the same date, the latest date is kept and the earlier ones (for that day)
- are discarded.
-
- If inverse price pairs are found, e.g. USD in AUD and AUD in USD, the
- inverse that has the smallest number of price points is converted into the
- one that has the most price points. In that way they are reconciled into a
- single one.
-
- Args:
- entries: A list of directives, hopefully including some Price and/or
- Transaction entries.
- Returns:
- A dict of (currency, cost-currency) keys to sorted lists of (date, number)
- pairs, where 'date' is the date the price occurs at and 'number' a Decimal
- that represents the price, or rate, between these two
- currencies/commodities. Each date occurs only once in the sorted list of
- prices of a particular key. All of the inverses are automatically
- generated in the price map.
- """
- # Fetch a list of all the price entries seen in the ledger.
- price_entries = [entry
- for entry in entries
- if isinstance(entry, Price)]
-
- # Build a map of exchange rates between these units.
- # (base-currency, quote-currency) -> List of (date, rate).
- price_map = collections.defaultdict(list)
- for price in price_entries:
- base_quote = (price.currency, price.amount.currency)
- price_map[base_quote].append((price.date, price.amount.number))
-
- # Find pairs of inversed units.
- inversed_units = []
- for base_quote, values in price_map.items():
- base, quote = base_quote
- if (quote, base) in price_map:
- inversed_units.append(base_quote)
-
- # Find pairs of inversed units, and swallow the conversion with the smaller
- # number of rates into the other one.
- for base, quote in inversed_units:
- bq_prices = price_map[(base, quote)]
- qb_prices = price_map[(quote, base)]
- remove = ((base, quote)
- if len(bq_prices) < len(qb_prices)
- else (quote, base))
- base, quote = remove
-
- remove_list = price_map[remove]
- insert_list = price_map[(quote, base)]
- del price_map[remove]
-
- inverted_list = [(date, ONE/rate)
- for (date, rate) in remove_list
- if rate != ZERO]
- insert_list.extend(inverted_list)
-
- # Unzip and sort each of the entries and eliminate duplicates on the date.
- sorted_price_map = PriceMap({
- base_quote: list(misc_utils.sorted_uniquify(date_rates, lambda x: x[0], last=True))
- for (base_quote, date_rates) in price_map.items()})
-
- # Compute and insert all the inverted rates.
- forward_pairs = list(sorted_price_map.keys())
- for (base, quote), price_list in list(sorted_price_map.items()):
- # Note: You have to filter out zero prices for zero-cost postings, like
- # gifted options.
- sorted_price_map[(quote, base)] = [
- (date, ONE/price) for date, price in price_list
- if price != ZERO]
-
- sorted_price_map.forward_pairs = forward_pairs
- return sorted_price_map
-
+
defbuild_price_map(entries):
+"""Build a price map from a list of arbitrary entries.
+
+ If multiple prices are found for the same (currency, cost-currency) pair at
+ the same date, the latest date is kept and the earlier ones (for that day)
+ are discarded.
+
+ If inverse price pairs are found, e.g. USD in AUD and AUD in USD, the
+ inverse that has the smallest number of price points is converted into the
+ one that has the most price points. In that way they are reconciled into a
+ single one.
+
+ Args:
+ entries: A list of directives, hopefully including some Price and/or
+ Transaction entries.
+ Returns:
+ A dict of (currency, cost-currency) keys to sorted lists of (date, number)
+ pairs, where 'date' is the date the price occurs at and 'number' a Decimal
+ that represents the price, or rate, between these two
+ currencies/commodities. Each date occurs only once in the sorted list of
+ prices of a particular key. All of the inverses are automatically
+ generated in the price map.
+ """
+ # Fetch a list of all the price entries seen in the ledger.
+ price_entries=[entryforentryinentriesifisinstance(entry,Price)]
+
+ # Build a map of exchange rates between these units.
+ # (base-currency, quote-currency) -> List of (date, rate).
+ price_map=collections.defaultdict(list)
+ forpriceinprice_entries:
+ base_quote=(price.currency,price.amount.currency)
+ price_map[base_quote].append((price.date,price.amount.number))
+
+ # Find pairs of inversed units.
+ inversed_units=[]
+ forbase_quote,valuesinprice_map.items():
+ base,quote=base_quote
+ if(quote,base)inprice_map:
+ inversed_units.append(base_quote)
+
+ # Find pairs of inversed units, and swallow the conversion with the smaller
+ # number of rates into the other one.
+ forbase,quoteininversed_units:
+ bq_prices=price_map[(base,quote)]
+ qb_prices=price_map[(quote,base)]
+ remove=(base,quote)iflen(bq_prices)<len(qb_prices)else(quote,base)
+ base,quote=remove
+
+ remove_list=price_map[remove]
+ insert_list=price_map[(quote,base)]
+ delprice_map[remove]
+
+ inverted_list=[(date,ONE/rate)for(date,rate)inremove_listifrate!=ZERO]
+ insert_list.extend(inverted_list)
+
+ # Unzip and sort each of the entries and eliminate duplicates on the date.
+ sorted_price_map=PriceMap(
+ {
+ base_quote:list(
+ misc_utils.sorted_uniquify(date_rates,lambdax:x[0],last=True)
+ )
+ for(base_quote,date_rates)inprice_map.items()
+ }
+ )
+
+ # Compute and insert all the inverted rates.
+ forward_pairs=list(sorted_price_map.keys())
+ for(base,quote),price_listinlist(sorted_price_map.items()):
+ # Note: You have to filter out zero prices for zero-cost postings, like
+ # gifted options.
+ sorted_price_map[(quote,base)]=[
+ (date,ONE/price)fordate,priceinprice_listifprice!=ZERO
+ ]
+
+ sorted_price_map.forward_pairs=forward_pairs
+ returnsorted_price_map
+
def get_all_prices(price_map, base_quote):
- """Return a sorted list of all (date, number) price pairs.
-
- Args:
- price_map: A price map, which is a dict of (base, quote) -> list of (date,
- number) tuples, as created by build_price_map.
- base_quote: A pair of strings, the base currency to lookup, and the quote
- currency to lookup, which expresses which units the base currency is
- denominated in. This may also just be a string, with a '/' separator.
- Returns:
- A list of (date, Decimal) pairs, sorted by date.
- Raises:
- KeyError: If the base/quote could not be found.
- """
- base_quote = normalize_base_quote(base_quote)
- return _lookup_price_and_inverse(price_map, base_quote)
-
+
defget_all_prices(price_map,base_quote):
+"""Return a sorted list of all (date, number) price pairs.
+
+ Args:
+ price_map: A price map, which is a dict of (base, quote) -> list of (date,
+ number) tuples, as created by build_price_map.
+ base_quote: A pair of strings, the base currency to lookup, and the quote
+ currency to lookup, which expresses which units the base currency is
+ denominated in. This may also just be a string, with a '/' separator.
+ Returns:
+ A list of (date, Decimal) pairs, sorted by date.
+ Raises:
+ KeyError: If the base/quote could not be found.
+ """
+ base_quote=normalize_base_quote(base_quote)
+ return_lookup_price_and_inverse(price_map,base_quote)
+
def get_last_price_entries(entries, date):
- """Run through the entries until the given date and return the last
- Price entry encountered for each (currency, cost-currency) pair.
-
- Args:
- entries: A list of directives.
- date: An instance of datetime.date. If None, the very latest price
- is returned.
- Returns:
- A list of price entries.
- """
- price_entry_map = {}
- for entry in entries:
- if date is not None and entry.date >= date:
- break
- if isinstance(entry, Price):
- base_quote = (entry.currency, entry.amount.currency)
- price_entry_map[base_quote] = entry
- return sorted(price_entry_map.values(), key=data.entry_sortkey)
-
+
defget_last_price_entries(entries,date):
+"""Run through the entries until the given date and return the last
+ Price entry encountered for each (currency, cost-currency) pair.
+
+ Args:
+ entries: A list of directives.
+ date: An instance of datetime.date. If None, the very latest price
+ is returned.
+ Returns:
+ A list of price entries.
+ """
+ price_entry_map={}
+ forentryinentries:
+ ifdateisnotNoneandentry.date>=date:
+ break
+ ifisinstance(entry,Price):
+ base_quote=(entry.currency,entry.amount.currency)
+ price_entry_map[base_quote]=entry
+ returnsorted(price_entry_map.values(),key=data.entry_sortkey)
+
def get_latest_price(price_map, base_quote):
- """Return the latest price/rate from a price map for the given base/quote pair.
- This is often used to just get the 'current' price if you're looking at the
- entire set of entries.
-
- Args:
- price_map: A price map, which is a dict of (base, quote) -> list of (date,
- number) tuples, as created by build_price_map.
- Returns:
- A pair of (date, number), where 'date' is a datetime.date instance and
- 'number' is a Decimal of the price, or rate, at that date. The date is the
- latest date which we have an available price for in the price map.
- """
- base_quote = normalize_base_quote(base_quote)
-
- # Handle the degenerate case of a currency priced into its own.
- base, quote = base_quote
- if quote is None or base == quote:
- return (None, ONE)
-
- # Look up the list and return the latest element. The lists are assumed to
- # be sorted.
- try:
- price_list = _lookup_price_and_inverse(price_map, base_quote)
- except KeyError:
- price_list = None
- if price_list:
- return price_list[-1]
- else:
- return None, None
-
+
defget_latest_price(price_map,base_quote):
+"""Return the latest price/rate from a price map for the given base/quote pair.
+ This is often used to just get the 'current' price if you're looking at the
+ entire set of entries.
+
+ Args:
+ price_map: A price map, which is a dict of (base, quote) -> list of (date,
+ number) tuples, as created by build_price_map.
+ Returns:
+ A pair of (date, number), where 'date' is a datetime.date instance and
+ 'number' is a Decimal of the price, or rate, at that date. The date is the
+ latest date which we have an available price for in the price map.
+ """
+ base_quote=normalize_base_quote(base_quote)
+
+ # Handle the degenerate case of a currency priced into its own.
+ base,quote=base_quote
+ ifquoteisNoneorbase==quote:
+ return(None,ONE)
+
+ # Look up the list and return the latest element. The lists are assumed to
+ # be sorted.
+ try:
+ price_list=_lookup_price_and_inverse(price_map,base_quote)
+ exceptKeyError:
+ price_list=None
+ ifprice_list:
+ returnprice_list[-1]
+ else:
+ returnNone,None
+
def get_price(price_map, base_quote, date=None):
- """Return the price as of the given date.
-
- If the date is unspecified, return the latest price.
-
- Args:
- price_map: A price map, which is a dict of (base, quote) -> list of (date,
- number) tuples, as created by build_price_map.
- base_quote: A pair of strings, the base currency to lookup, and the quote
- currency to lookup, which expresses which units the base currency is
- denominated in. This may also just be a string, with a '/' separator.
- date: A datetime.date instance, the date at which we want the conversion
- rate.
- Returns:
- A pair of (datetime.date, Decimal) instance. If no price information could
- be found, return (None, None).
- """
- if date is None:
- return get_latest_price(price_map, base_quote)
-
- base_quote = normalize_base_quote(base_quote)
-
- # Handle the degenerate case of a currency priced into its own.
- base, quote = base_quote
- if quote is None or base == quote:
- return (None, ONE)
-
- try:
- price_list = _lookup_price_and_inverse(price_map, base_quote)
- index = bisect_key.bisect_right_with_key(price_list, date, key=lambda x: x[0])
- if index == 0:
- return None, None
- else:
- return price_list[index-1]
- except KeyError:
- return None, None
-
+
defget_price(price_map,base_quote,date=None):
+"""Return the price as of the given date.
+
+ If the date is unspecified, return the latest price.
+
+ Args:
+ price_map: A price map, which is a dict of (base, quote) -> list of (date,
+ number) tuples, as created by build_price_map.
+ base_quote: A pair of strings, the base currency to lookup, and the quote
+ currency to lookup, which expresses which units the base currency is
+ denominated in. This may also just be a string, with a '/' separator.
+ date: A datetime.date instance, the date at which we want the conversion
+ rate.
+ Returns:
+ A pair of (datetime.date, Decimal) instance. If no price information could
+ be found, return (None, None).
+ """
+ ifdateisNone:
+ returnget_latest_price(price_map,base_quote)
+
+ base_quote=normalize_base_quote(base_quote)
+
+ # Handle the degenerate case of a currency priced into its own.
+ base,quote=base_quote
+ ifquoteisNoneorbase==quote:
+ return(None,ONE)
+
+ try:
+ price_list=_lookup_price_and_inverse(price_map,base_quote)
+ index=bisect_key.bisect_right_with_key(price_list,date,key=lambdax:x[0])
+ ifindex==0:
+ returnNone,None
+ else:
+ returnprice_list[index-1]
+ exceptKeyError:
+ returnNone,None
+
def normalize_base_quote(base_quote):
- """Convert a slash-separated string to a pair of strings.
-
- Args:
- base_quote: A pair of strings, the base currency to lookup, and the quote
- currency to lookup, which expresses which units the base currency is
- denominated in. This may also just be a string, with a '/' separator.
- Returns:
- A pair of strings.
- """
- if isinstance(base_quote, str):
- base_quote_norm = tuple(base_quote.split('/'))
- assert len(base_quote_norm) == 2, base_quote
- base_quote = base_quote_norm
- assert isinstance(base_quote, tuple), base_quote
- return base_quote
-
+
defnormalize_base_quote(base_quote):
+"""Convert a slash-separated string to a pair of strings.
+
+ Args:
+ base_quote: A pair of strings, the base currency to lookup, and the quote
+ currency to lookup, which expresses which units the base currency is
+ denominated in. This may also just be a string, with a '/' separator.
+ Returns:
+ A pair of strings.
+ """
+ ifisinstance(base_quote,str):
+ base_quote_norm=tuple(base_quote.split("/"))
+ assertlen(base_quote_norm)==2,base_quote
+ base_quote=base_quote_norm
+ assertisinstance(base_quote,tuple),base_quote
+ returnbase_quote
+
Project all prices with a quote currency to another quote currency.
+
Say you have a price for HOOL in USD and you'd like to convert HOOL to CAD.
+If there aren't any (HOOL, CAD) price pairs in the database it will remain
+unconverted. Projecting from USD to CAD will compute combined rates and
+insert corresponding prices over all base currencies (like HOOL). In this
+example, each of the (HOOL, USD) prices would see an inserted (HOOL, CAD)
+price inserted at the same date.
+
It is common to make these projections when reducing inventories in a ledger
+that states multiple operating currency pairs, when for example, one wants
+to compute total value of a set of accounts in one of those currencies.
+
Please note that:
+
+
+
Even if the target pair has existing entries, projection will still be
+ applied. For example, is there exist some (HOOL, CAD) prices, the
+ projection in the example above will still insert some new price points to
+ it.
+
+
+
However, projected prices colliding existing ones at the same date will
+ not override them.
+
+
+
Projection will fail to insert a new price if the conversion between to
+ and from currencies has no existing prices (e.g. before its first price
+ entry).
+
+
+
Perhaps most importantly, we only insert price points at dates where the
+ base commodity has a price point. In other words, if we have prices for
+ dates A and C and the rate changes between these dates at date B, we don't
+ synthesize a new price at date B. A more accurate method to get projected
+ prices that takes into account varying rates is to do multiple lookups.
+ We'll eventually add a method to query it via a specified list of
+ intermediate pairs. {c1bd24f8d4b7}
+
+
+
+
+
+
+
+
+
+
+
Parameters:
+
+
+
orig_price_map (PriceMap) – An existing price map.
+
from_currency (str) – The quote currency with existing project points (e.g., USD).
+
to_currency (str) – The quote currency to insert price points for (e.g., CAD).
+
base_currencies (Optional[Set[str]]) – An optional set of commodities to restrict the
+projections to (e.g., {HOOL}).
+
+
+
+
+
+
+
+
+
+
+
+
+
Returns:
+
+
+
PriceMap – A new price map, with the extra projected prices. The original price map
+is kept intact.
+
+
+
+
+
+
+ Source code in beancount/core/prices.py
+
defproject(
+ orig_price_map:PriceMap,
+ from_currency:Currency,
+ to_currency:Currency,
+ base_currencies:Optional[Set[Currency]]=None,
+)->PriceMap:
+"""Project all prices with a quote currency to another quote currency.
+
+ Say you have a price for HOOL in USD and you'd like to convert HOOL to CAD.
+ If there aren't any (HOOL, CAD) price pairs in the database it will remain
+ unconverted. Projecting from USD to CAD will compute combined rates and
+ insert corresponding prices over all base currencies (like HOOL). In this
+ example, each of the (HOOL, USD) prices would see an inserted (HOOL, CAD)
+ price inserted at the same date.
+
+ It is common to make these projections when reducing inventories in a ledger
+ that states multiple operating currency pairs, when for example, one wants
+ to compute total value of a set of accounts in one of those currencies.
+
+ Please note that:
+
+ - Even if the target pair has existing entries, projection will still be
+ applied. For example, is there exist some (HOOL, CAD) prices, the
+ projection in the example above will still insert some new price points to
+ it.
+
+ - However, projected prices colliding existing ones at the same date will
+ not override them.
+
+ - Projection will fail to insert a new price if the conversion between to
+ and from currencies has no existing prices (e.g. before its first price
+ entry).
+
+ - Perhaps most importantly, we only insert price points at dates where the
+ base commodity has a price point. In other words, if we have prices for
+ dates A and C and the rate changes between these dates at date B, we don't
+ synthesize a new price at date B. A more accurate method to get projected
+ prices that takes into account varying rates is to do multiple lookups.
+ We'll eventually add a method to query it via a specified list of
+ intermediate pairs. {c1bd24f8d4b7}
+
+ Args:
+ orig_price_map: An existing price map.
+ from_currency: The quote currency with existing project points (e.g., USD).
+ to_currency: The quote currency to insert price points for (e.g., CAD).
+ base_currencies: An optional set of commodities to restrict the
+ projections to (e.g., {HOOL}).
+ Returns:
+ A new price map, with the extra projected prices. The original price map
+ is kept intact.
+ """
+ # If nothing is requested, return the original map.
+ iffrom_currency==to_currency:
+ returnorig_price_map
+
+ # Avoid mutating the input map.
+ price_map={key:list(value)forkey,valueinorig_price_map.items()}
+
+ # Process the entire database (it's not indexed by quote currency).
+ currency_pair=(from_currency,to_currency)
+ forbase_quote,pricesinlist(price_map.items()):
+ # Filter just the currencies to convert.
+ base,quote=base_quote
+ ifquote!=from_currency:
+ continue
+
+ # Skip currencies not requested if a constraint has been provided.
+ # {4bb702d82c8a}
+ ifbase_currenciesandbasenotinbase_currencies:
+ continue
+
+ # Create a mapping of existing prices so we can avoid date collisions.
+ existing_prices=(
+ {datefordate,_inprice_map[(base,to_currency)]}
+ if(base,to_currency)inprice_map
+ elseset()
+ )
+
+ # Project over each of the prices.
+ new_projected=[]
+ fordate,priceinprices:
+ rate_date,rate=get_price(price_map,currency_pair,date)
+ ifrateisNone:
+ # There is no conversion rate at this time; skip projection.
+ # {b2b23353275d}.
+ continue
+ ifrate_dateinexisting_prices:
+ # Skip collisions in date. {97a5703ac517}
+ continue
+
+ # Append the new rate.
+ new_price=price*rate
+ new_projected.append((date,new_price))
+
+ # Make sure the resulting lists are sorted.
+ ifnew_projected:
+ projected=price_map.setdefault((base,to_currency),[])
+ projected.extend(new_projected)
+ projected.sort()
+
+ inverted=price_map.setdefault((to_currency,base),[])
+ inverted.extend(
+ (date,ZEROifrate==ZEROelseONE/rate)fordate,rateinnew_projected
+ )
+ inverted.sort()
+
+ returnprice_map
+
def __eq__(self, other):
- """Equality predicate. All attributes are compared.
-
- Args:
- other: Another instance of RealAccount.
- Returns:
- A boolean, True if the two real accounts are equal.
- """
- return (dict.__eq__(self, other) and
- self.account == other.account and
- self.balance == other.balance and
- self.txn_postings == other.txn_postings)
-
+
def__eq__(self,other):
+"""Equality predicate. All attributes are compared.
+
+ Args:
+ other: Another instance of RealAccount.
+ Returns:
+ A boolean, True if the two real accounts are equal.
+ """
+ return(
+ dict.__eq__(self,other)
+ andself.account==other.account
+ andself.balance==other.balance
+ andself.txn_postings==other.txn_postings
+ )
+
def __init__(self, account_name, *args, **kwargs):
- """Create a RealAccount instance.
-
- Args:
- account_name: a string, the name of the account. Maybe not be None.
- """
- super().__init__(*args, **kwargs)
- assert isinstance(account_name, str)
- self.account = account_name
- self.txn_postings = []
- self.balance = inventory.Inventory()
-
+
def__init__(self,account_name,*args,**kwargs):
+"""Create a RealAccount instance.
+
+ Args:
+ account_name: a string, the name of the account. Maybe not be None.
+ """
+ super().__init__(*args,**kwargs)
+ assertisinstance(account_name,str)
+ self.account=account_name
+ self.txn_postings=[]
+ self.balance=inventory.Inventory()
+
def __ne__(self, other):
- """Not-equality predicate. See __eq__.
-
- Args:
- other: Another instance of RealAccount.
- Returns:
- A boolean, True if the two real accounts are not equal.
- """
- return not self.__eq__(other)
-
+
def__ne__(self,other):
+"""Not-equality predicate. See __eq__.
+
+ Args:
+ other: Another instance of RealAccount.
+ Returns:
+ A boolean, True if the two real accounts are not equal.
+ """
+ returnnotself.__eq__(other)
+
def __setitem__(self, key, value):
- """Prevent the setting of non-string or non-empty keys on this dict.
-
- Args:
- key: The dictionary key. Must be a string.
- value: The value, must be a RealAccount instance.
- Raises:
- KeyError: If the key is not a string, or is invalid.
- ValueError: If the value is not a RealAccount instance.
- """
- if not isinstance(key, str) or not key:
- raise KeyError("Invalid RealAccount key: '{}'".format(key))
- if not isinstance(value, RealAccount):
- raise ValueError("Invalid RealAccount value: '{}'".format(value))
- if not value.account.endswith(key):
- raise ValueError("RealAccount name '{}' inconsistent with key: '{}'".format(
- value.account, key))
- return super().__setitem__(key, value)
-
+
def__setitem__(self,key,value):
+"""Prevent the setting of non-string or non-empty keys on this dict.
+
+ Args:
+ key: The dictionary key. Must be a string.
+ value: The value, must be a RealAccount instance.
+ Raises:
+ KeyError: If the key is not a string, or is invalid.
+ ValueError: If the value is not a RealAccount instance.
+ """
+ ifnotisinstance(key,str)ornotkey:
+ raiseKeyError("Invalid RealAccount key: '{}'".format(key))
+ ifnotisinstance(value,RealAccount):
+ raiseValueError("Invalid RealAccount value: '{}'".format(value))
+ ifnotvalue.account.endswith(key):
+ raiseValueError(
+ "RealAccount name '{}' inconsistent with key: '{}'".format(
+ value.account,key
+ )
+ )
+ returnsuper().__setitem__(key,value)
+
def copy(self):
- """Override dict.copy() to clone a RealAccount.
-
- This is only necessary to correctly implement the copy method.
- Otherwise, calling .copy() on a RealAccount instance invokes the base
- class' method, which return just a dict.
-
- Returns:
- A cloned instance of RealAccount, with all members shallow-copied.
- """
- return copy.copy(self)
-
+
defcopy(self):
+"""Override dict.copy() to clone a RealAccount.
+
+ This is only necessary to correctly implement the copy method.
+ Otherwise, calling .copy() on a RealAccount instance invokes the base
+ class' method, which return just a dict.
+
+ Returns:
+ A cloned instance of RealAccount, with all members shallow-copied.
+ """
+ returncopy.copy(self)
+
def compute_balance(real_account, leaf_only=False):
- """Compute the total balance of this account and all its subaccounts.
-
- Args:
- real_account: A RealAccount instance.
- leaf_only: A boolean flag, true if we should yield only leaves.
- Returns:
- An Inventory.
- """
- return functools.reduce(operator.add, [
- ra.balance for ra in iter_children(real_account, leaf_only)])
-
+
defcompute_balance(real_account,leaf_only=False):
+"""Compute the total balance of this account and all its subaccounts.
+
+ Args:
+ real_account: A RealAccount instance.
+ leaf_only: A boolean flag, true if we should yield only leaves.
+ Returns:
+ An Inventory.
+ """
+ returnfunctools.reduce(
+ operator.add,[ra.balanceforrainiter_children(real_account,leaf_only)]
+ )
+
def compute_postings_balance(txn_postings):
- """Compute the balance of a list of Postings's or TxnPosting's positions.
-
- Args:
- postings: A list of Posting instances and other directives (which are
- skipped).
- Returns:
- An Inventory.
- """
- final_balance = inventory.Inventory()
- for txn_posting in txn_postings:
- if isinstance(txn_posting, Posting):
- final_balance.add_position(txn_posting)
- elif isinstance(txn_posting, TxnPosting):
- final_balance.add_position(txn_posting.posting)
- return final_balance
-
+
defcompute_postings_balance(txn_postings):
+"""Compute the balance of a list of Postings's or TxnPosting's positions.
+
+ Args:
+ postings: A list of Posting instances and other directives (which are
+ skipped).
+ Returns:
+ An Inventory.
+ """
+ final_balance=inventory.Inventory()
+ fortxn_postingintxn_postings:
+ ifisinstance(txn_posting,Posting):
+ final_balance.add_position(txn_posting)
+ elifisinstance(txn_posting,TxnPosting):
+ final_balance.add_position(txn_posting.posting)
+ returnfinal_balance
+
def contains(real_account, account_name):
- """True if the given account node contains the subaccount name.
-
- Args:
- account_name: A string, the name of a direct or indirect subaccount of
- this node.
- Returns:
- A boolean, true the name is a child of this node.
- """
- return get(real_account, account_name) is not None
-
+
defcontains(real_account,account_name):
+"""True if the given account node contains the subaccount name.
+
+ Args:
+ account_name: A string, the name of a direct or indirect subaccount of
+ this node.
+ Returns:
+ A boolean, true the name is a child of this node.
+ """
+ returnget(real_account,account_name)isnotNone
+
def dump(root_account):
- """Convert a RealAccount node to a line of lines.
-
- Note: this is not meant to be used to produce text reports; the reporting
- code should produce an intermediate object that contains the structure of
- it, which can then be rendered to ASCII, HTML or CSV formats. This is
- intended as a convenient little function for dumping trees of data for
- debugging purposes.
-
- Args:
- root_account: A RealAccount instance.
- Returns:
- A list of tuples of (first_line, continuation_line, real_account) where
- first_line: A string, the first line to render, which includes the
- account name.
- continuation_line: A string, further line to render if necessary.
- real_account: The RealAccount instance which corresponds to this
- line.
- """
- # Compute all the lines ahead of time in order to calculate the width.
- lines = []
-
- # Start with the root node. We push the constant prefix before this node,
- # the account name, and the RealAccount instance. We will maintain a stack
- # of children nodes to render.
- stack = [('', root_account.account, root_account, True)]
- while stack:
- prefix, name, real_account, is_last = stack.pop(-1)
-
- if real_account is root_account:
- # For the root node, we don't want to render any prefix.
- first = cont = ''
- else:
- # Compute the string that precedes the name directly and the one below
- # that for the continuation lines.
- # |
- # @@@ Bank1 <----------------
- # @@@ |
- # | |-- Checking
- if is_last:
- first = prefix + PREFIX_LEAF_1
- cont = prefix + PREFIX_LEAF_C
- else:
- first = prefix + PREFIX_CHILD_1
- cont = prefix + PREFIX_CHILD_C
-
- # Compute the name to render for continuation lines.
- # |
- # |-- Bank1
- # | @@@ <----------------
- # | |-- Checking
- if len(real_account) > 0:
- cont_name = PREFIX_CHILD_C
- else:
- cont_name = PREFIX_LEAF_C
-
- # Add a line for this account.
- if not (real_account is root_account and not name):
- lines.append((first + name,
- cont + cont_name,
- real_account))
-
- # Push the children onto the stack, being careful with ordering and
- # marking the last node as such.
- child_items = sorted(real_account.items(), reverse=True)
- if child_items:
- child_iter = iter(child_items)
- child_name, child_account = next(child_iter)
- stack.append((cont, child_name, child_account, True))
- for child_name, child_account in child_iter:
- stack.append((cont, child_name, child_account, False))
-
- if not lines:
- return lines
-
- # Compute the maximum width of the lines and convert all of them to the same
- # maximal width. This makes it easy on the client.
- max_width = max(len(first_line) for first_line, _, __ in lines)
- line_format = '{{:{width}}}'.format(width=max_width)
- return [(line_format.format(first_line),
- line_format.format(cont_line),
- real_node)
- for (first_line, cont_line, real_node) in lines]
-
+
defdump(root_account):
+"""Convert a RealAccount node to a line of lines.
+
+ Note: this is not meant to be used to produce text reports; the reporting
+ code should produce an intermediate object that contains the structure of
+ it, which can then be rendered to ASCII, HTML or CSV formats. This is
+ intended as a convenient little function for dumping trees of data for
+ debugging purposes.
+
+ Args:
+ root_account: A RealAccount instance.
+ Returns:
+ A list of tuples of (first_line, continuation_line, real_account) where
+ first_line: A string, the first line to render, which includes the
+ account name.
+ continuation_line: A string, further line to render if necessary.
+ real_account: The RealAccount instance which corresponds to this
+ line.
+ """
+ # Compute all the lines ahead of time in order to calculate the width.
+ lines=[]
+
+ # Start with the root node. We push the constant prefix before this node,
+ # the account name, and the RealAccount instance. We will maintain a stack
+ # of children nodes to render.
+ stack=[("",root_account.account,root_account,True)]
+ whilestack:
+ prefix,name,real_account,is_last=stack.pop(-1)
+
+ ifreal_accountisroot_account:
+ # For the root node, we don't want to render any prefix.
+ first=cont=""
+ else:
+ # Compute the string that precedes the name directly and the one below
+ # that for the continuation lines.
+ # |
+ # @@@ Bank1 <----------------
+ # @@@ |
+ # | |-- Checking
+ ifis_last:
+ first=prefix+PREFIX_LEAF_1
+ cont=prefix+PREFIX_LEAF_C
+ else:
+ first=prefix+PREFIX_CHILD_1
+ cont=prefix+PREFIX_CHILD_C
+
+ # Compute the name to render for continuation lines.
+ # |
+ # |-- Bank1
+ # | @@@ <----------------
+ # | |-- Checking
+ iflen(real_account)>0:
+ cont_name=PREFIX_CHILD_C
+ else:
+ cont_name=PREFIX_LEAF_C
+
+ # Add a line for this account.
+ ifnot(real_accountisroot_accountandnotname):
+ lines.append((first+name,cont+cont_name,real_account))
+
+ # Push the children onto the stack, being careful with ordering and
+ # marking the last node as such.
+ child_items=sorted(real_account.items(),reverse=True)
+ ifchild_items:
+ child_iter=iter(child_items)
+ child_name,child_account=next(child_iter)
+ stack.append((cont,child_name,child_account,True))
+ forchild_name,child_accountinchild_iter:
+ stack.append((cont,child_name,child_account,False))
+
+ ifnotlines:
+ returnlines
+
+ # Compute the maximum width of the lines and convert all of them to the same
+ # maximal width. This makes it easy on the client.
+ max_width=max(len(first_line)forfirst_line,_,__inlines)
+ line_format="{{:{width}}}".format(width=max_width)
+ return[
+ (line_format.format(first_line),line_format.format(cont_line),real_node)
+ for(first_line,cont_line,real_node)inlines
+ ]
+
def dump_balances(real_root, dformat, at_cost=False, fullnames=False, file=None):
- """Dump a realization tree with balances.
-
- Args:
- real_root: An instance of RealAccount.
- dformat: An instance of DisplayFormatter to format the numbers with.
- at_cost: A boolean, if true, render the values at cost.
- fullnames: A boolean, if true, don't render a tree of accounts and
- render the full account names.
- file: A file object to dump the output to. If not specified, we
- return the output as a string.
- Returns:
- A string, the rendered tree, or nothing, if 'file' was provided.
- """
- if fullnames:
- # Compute the maximum account name length;
- maxlen = max(len(real_child.account)
- for real_child in iter_children(real_root, leaf_only=True))
- line_format = '{{:{width}}} {{}}\n'.format(width=maxlen)
- else:
- line_format = '{} {}\n'
-
- output = file or io.StringIO()
- for first_line, cont_line, real_account in dump(real_root):
- if not real_account.balance.is_empty():
- if at_cost:
- rinv = real_account.balance.reduce(convert.get_cost)
- else:
- rinv = real_account.balance.reduce(convert.get_units)
- amounts = [position.units for position in rinv.get_positions()]
- positions = [amount_.to_string(dformat)
- for amount_ in sorted(amounts, key=amount.sortkey)]
- else:
- positions = ['']
-
- if fullnames:
- for position in positions:
- if not position and len(real_account) > 0:
- continue # Skip parent accounts with no position to render.
- output.write(line_format.format(real_account.account, position))
- else:
- line = first_line
- for position in positions:
- output.write(line_format.format(line, position))
- line = cont_line
-
- if file is None:
- return output.getvalue()
-
+
defdump_balances(real_root,dformat,at_cost=False,fullnames=False,file=None):
+"""Dump a realization tree with balances.
+
+ Args:
+ real_root: An instance of RealAccount.
+ dformat: An instance of DisplayFormatter to format the numbers with.
+ at_cost: A boolean, if true, render the values at cost.
+ fullnames: A boolean, if true, don't render a tree of accounts and
+ render the full account names.
+ file: A file object to dump the output to. If not specified, we
+ return the output as a string.
+ Returns:
+ A string, the rendered tree, or nothing, if 'file' was provided.
+ """
+ iffullnames:
+ # Compute the maximum account name length;
+ maxlen=max(
+ len(real_child.account)
+ forreal_childiniter_children(real_root,leaf_only=True)
+ )
+ line_format="{{:{width}}} {{}}\n".format(width=maxlen)
+ else:
+ line_format="{}{}\n"
+
+ output=fileorio.StringIO()
+ forfirst_line,cont_line,real_accountindump(real_root):
+ ifnotreal_account.balance.is_empty():
+ ifat_cost:
+ rinv=real_account.balance.reduce(convert.get_cost)
+ else:
+ rinv=real_account.balance.reduce(convert.get_units)
+ amounts=[position.unitsforpositioninrinv.get_positions()]
+ positions=[
+ amount_.to_string(dformat)
+ foramount_insorted(amounts,key=amount.sortkey)
+ ]
+ else:
+ positions=[""]
+
+ iffullnames:
+ forpositioninpositions:
+ ifnotpositionandlen(real_account)>0:
+ continue# Skip parent accounts with no position to render.
+ output.write(line_format.format(real_account.account,position))
+ else:
+ line=first_line
+ forpositioninpositions:
+ output.write(line_format.format(line,position))
+ line=cont_line
+
+ iffileisNone:
+ returnoutput.getvalue()
+
predicate – A callable/function which accepts a real_account and returns
+
predicate – A callable/function which accepts a RealAccount and returns
a boolean. If the function returns True, the node is kept.
@@ -17990,36 +21308,36 @@
Source code in beancount/core/realization.py
-
def filter(real_account, predicate):
- """Filter a RealAccount tree of nodes by the predicate.
-
- This function visits the tree and applies the predicate on each node. It
- returns a partial clone of RealAccount whereby on each node
- - either the predicate is true, or
- - for at least one child of the node the predicate is true.
- All the leaves have the predicate be true.
-
- Args:
- real_account: An instance of RealAccount.
- predicate: A callable/function which accepts a real_account and returns
- a boolean. If the function returns True, the node is kept.
- Returns:
- A shallow clone of RealAccount is always returned.
- """
- assert isinstance(real_account, RealAccount)
-
- real_copy = RealAccount(real_account.account)
- real_copy.balance = real_account.balance
- real_copy.txn_postings = real_account.txn_postings
-
- for child_name, real_child in real_account.items():
- real_child_copy = filter(real_child, predicate)
- if real_child_copy is not None:
- real_copy[child_name] = real_child_copy
-
- if len(real_copy) > 0 or predicate(real_account):
- return real_copy
-
+
deffilter(real_account,predicate):
+"""Filter a RealAccount tree of nodes by the predicate.
+
+ This function visits the tree and applies the predicate on each node. It
+ returns a partial clone of RealAccount whereby on each node
+ - either the predicate is true, or
+ - for at least one child of the node the predicate is true.
+ All the leaves have the predicate be true.
+
+ Args:
+ real_account: An instance of RealAccount.
+ predicate: A callable/function which accepts a RealAccount and returns
+ a boolean. If the function returns True, the node is kept.
+ Returns:
+ A shallow clone of RealAccount is always returned.
+ """
+ assertisinstance(real_account,RealAccount)
+
+ real_copy=RealAccount(real_account.account)
+ real_copy.balance=real_account.balance
+ real_copy.txn_postings=real_account.txn_postings
+
+ forchild_name,real_childinreal_account.items():
+ real_child_copy=filter(real_child,predicate)
+ ifreal_child_copyisnotNone:
+ real_copy[child_name]=real_child_copy
+
+ iflen(real_copy)>0orpredicate(real_account):
+ returnreal_copy
+
def find_last_active_posting(txn_postings):
- """Look at the end of the list of postings, and find the last
- posting or entry that is not an automatically added directive.
- Note that if the account is closed, the last posting is assumed
- to be a Close directive (this is the case if the input is valid
- and checks without errors.
-
- Args:
- txn_postings: a list of postings or entries.
- Returns:
- An entry, or None, if the input list was empty.
- """
- for txn_posting in reversed(txn_postings):
- assert not isinstance(txn_posting, Posting)
-
- if not isinstance(txn_posting, (TxnPosting, Open, Close, Pad, Balance, Note)):
- continue
-
- # pylint: disable=bad-continuation
- if (isinstance(txn_posting, TxnPosting) and
- txn_posting.txn.flag == flags.FLAG_UNREALIZED):
- continue
- return txn_posting
-
+
deffind_last_active_posting(txn_postings):
+"""Look at the end of the list of postings, and find the last
+ posting or entry that is not an automatically added directive.
+ Note that if the account is closed, the last posting is assumed
+ to be a Close directive (this is the case if the input is valid
+ and checks without errors.
+
+ Args:
+ txn_postings: a list of postings or entries.
+ Returns:
+ An entry, or None, if the input list was empty.
+ """
+ fortxn_postinginreversed(txn_postings):
+ assertnotisinstance(txn_posting,Posting)
+ ifnotisinstance(txn_posting,(TxnPosting,Open,Close,Pad,Balance,Note)):
+ continue
+ returntxn_posting
+
def get(real_account, account_name, default=None):
- """Fetch the subaccount name from the real_account node.
-
- Args:
- real_account: An instance of RealAccount, the parent node to look for
- children of.
- account_name: A string, the name of a possibly indirect child leaf
- found down the tree of 'real_account' nodes.
- default: The default value that should be returned if the child
- subaccount is not found.
- Returns:
- A RealAccount instance for the child, or the default value, if the child
- is not found.
- """
- if not isinstance(account_name, str):
- raise ValueError
- components = account.split(account_name)
- for component in components:
- real_child = real_account.get(component, default)
- if real_child is default:
- return default
- real_account = real_child
- return real_account
-
+
defget(real_account,account_name,default=None):
+"""Fetch the subaccount name from the real_account node.
+
+ Args:
+ real_account: An instance of RealAccount, the parent node to look for
+ children of.
+ account_name: A string, the name of a possibly indirect child leaf
+ found down the tree of 'real_account' nodes.
+ default: The default value that should be returned if the child
+ subaccount is not found.
+ Returns:
+ A RealAccount instance for the child, or the default value, if the child
+ is not found.
+ """
+ ifnotisinstance(account_name,str):
+ raiseValueError
+ components=account.split(account_name)
+ forcomponentincomponents:
+ real_child=real_account.get(component,default)
+ ifreal_childisdefault:
+ returndefault
+ real_account=real_child
+ returnreal_account
+
def get_or_create(real_account, account_name):
- """Fetch the subaccount name from the real_account node.
-
- Args:
- real_account: An instance of RealAccount, the parent node to look for
- children of, or create under.
- account_name: A string, the name of the direct or indirect child leaf
- to get or create.
- Returns:
- A RealAccount instance for the child, or the default value, if the child
- is not found.
- """
- if not isinstance(account_name, str):
- raise ValueError
- components = account.split(account_name)
- path = []
- for component in components:
- path.append(component)
- real_child = real_account.get(component, None)
- if real_child is None:
- real_child = RealAccount(account.join(*path))
- real_account[component] = real_child
- real_account = real_child
- return real_account
-
+
defget_or_create(real_account,account_name):
+"""Fetch the subaccount name from the real_account node.
+
+ Args:
+ real_account: An instance of RealAccount, the parent node to look for
+ children of, or create under.
+ account_name: A string, the name of the direct or indirect child leaf
+ to get or create.
+ Returns:
+ A RealAccount instance for the child, or the default value, if the child
+ is not found.
+ """
+ ifnotisinstance(account_name,str):
+ raiseValueError
+ components=account.split(account_name)
+ path=[]
+ forcomponentincomponents:
+ path.append(component)
+ real_child=real_account.get(component,None)
+ ifreal_childisNone:
+ real_child=RealAccount(account.join(*path))
+ real_account[component]=real_child
+ real_account=real_child
+ returnreal_account
+
def get_postings(real_account):
- """Return a sorted list a RealAccount's postings and children.
-
- Args:
- real_account: An instance of RealAccount.
- Returns:
- A list of Posting or directories.
- """
- # We accumulate all the postings at once here instead of incrementally
- # because we need to return them sorted.
- accumulator = []
- for real_child in iter_children(real_account):
- accumulator.extend(real_child.txn_postings)
- accumulator.sort(key=data.posting_sortkey)
- return accumulator
-
+
defget_postings(real_account):
+"""Return a sorted list a RealAccount's postings and children.
+
+ Args:
+ real_account: An instance of RealAccount.
+ Returns:
+ A list of Posting or directories.
+ """
+ # We accumulate all the postings at once here instead of incrementally
+ # because we need to return them sorted.
+ accumulator=[]
+ forreal_childiniter_children(real_account):
+ accumulator.extend(real_child.txn_postings)
+ accumulator.sort(key=data.posting_sortkey)
+ returnaccumulator
+
def index_key(sequence, value, key, cmp):
- """Find the index of the first element in 'sequence' which is equal to 'value'.
- If 'key' is specified, the value compared to the value returned by this
- function. If the value is not found, return None.
-
- Args:
- sequence: The sequence to search.
- value: The value to search for.
- key: A predicate to call to obtain the value to compare against.
- cmp: A comparison predicate.
- Returns:
- The index of the first element found, or None, if the element was not found.
- """
- for index, element in enumerate(sequence):
- if cmp(key(element), value):
- return index
- return
-
+
defindex_key(sequence,value,key,cmp):
+"""Find the index of the first element in 'sequence' which is equal to 'value'.
+ If 'key' is specified, the value compared to the value returned by this
+ function. If the value is not found, return None.
+
+ Args:
+ sequence: The sequence to search.
+ value: The value to search for.
+ key: A predicate to call to obtain the value to compare against.
+ cmp: A comparison predicate.
+ Returns:
+ The index of the first element found, or None, if the element was not found.
+ """
+ forindex,elementinenumerate(sequence):
+ ifcmp(key(element),value):
+ returnindex
+ return
+
def iter_children(real_account, leaf_only=False):
- """Iterate this account node and all its children, depth-first.
-
- Args:
- real_account: An instance of RealAccount.
- leaf_only: A boolean flag, true if we should yield only leaves.
- Yields:
- Instances of RealAccount, beginning with this account. The order is
- undetermined.
- """
- if leaf_only:
- if len(real_account) == 0:
- yield real_account
- else:
- for key, real_child in sorted(real_account.items()):
- for real_subchild in iter_children(real_child, leaf_only):
- yield real_subchild
- else:
- yield real_account
- for key, real_child in sorted(real_account.items()):
- for real_subchild in iter_children(real_child):
- yield real_subchild
-
+
defiter_children(real_account,leaf_only=False):
+"""Iterate this account node and all its children, depth-first.
+
+ Args:
+ real_account: An instance of RealAccount.
+ leaf_only: A boolean flag, true if we should yield only leaves.
+ Yields:
+ Instances of RealAccount, beginning with this account. The order is
+ undetermined.
+ """
+ ifleaf_only:
+ iflen(real_account)==0:
+ yieldreal_account
+ else:
+ forkey,real_childinsorted(real_account.items()):
+ forreal_subchildiniter_children(real_child,leaf_only):
+ yieldreal_subchild
+ else:
+ yieldreal_account
+ forkey,real_childinsorted(real_account.items()):
+ forreal_subchildiniter_children(real_child):
+ yieldreal_subchild
+
def iterate_with_balance(txn_postings):
- """Iterate over the entries, accumulating the running balance.
-
- For each entry, this yields tuples of the form:
-
- (entry, postings, change, balance)
-
- entry: This is the directive for this line. If the list contained Posting
- instance, this yields the corresponding Transaction object.
- postings: A list of postings on this entry that affect the balance. Only the
- postings encountered in the input list are included; only those affect the
- balance. If 'entry' is not a Transaction directive, this should always be
- an empty list. We preserve the original ordering of the postings as they
- appear in the input list.
- change: An Inventory object that reflects the total change due to the
- postings from this entry that appear in the list. For example, if a
- Transaction has three postings and two are in the input list, the sum of
- the two postings will be in the 'change' Inventory object. However, the
- position for the transactions' third posting--the one not included in the
- input list--will not be in this inventory.
- balance: An Inventory object that reflects the balance *after* adding the
- 'change' inventory due to this entry's transaction. The 'balance' yielded
- is never None, even for entries that do not affect the balance, that is,
- with an empty 'change' inventory. It's up to the caller, the one rendering
- the entry, to decide whether to render this entry's change for a
- particular entry type.
-
- Note that if the input list of postings-or-entries is not in sorted date
- order, two postings for the same entry appearing twice with a different date
- in between will cause the entry appear twice. This is correct behavior, and
- it is expected that in practice this should never happen anyway, because the
- list of postings or entries should always be sorted. This method attempts to
- detect this and raises an assertion if this is seen.
-
- Args:
- txn_postings: A list of postings or directive instances.
- Postings affect the balance; other entries do not.
- Yields:
- Tuples of (entry, postings, change, balance) as described above.
- """
-
- # The running balance.
- running_balance = inventory.Inventory()
-
- # Previous date.
- prev_date = None
-
- # A list of entries at the current date.
- date_entries = []
-
- first = lambda pair: pair[0]
- for txn_posting in txn_postings:
-
- # Get the posting if we are dealing with one.
- assert not isinstance(txn_posting, Posting)
- if isinstance(txn_posting, TxnPosting):
- posting = txn_posting.posting
- entry = txn_posting.txn
- else:
- posting = None
- entry = txn_posting
-
- if entry.date != prev_date:
- assert prev_date is None or entry.date > prev_date, (
- "Invalid date order for postings: {} > {}".format(prev_date, entry.date))
- prev_date = entry.date
-
- # Flush the dated entries.
- for date_entry, date_postings in date_entries:
- change = inventory.Inventory()
- if date_postings:
- # Compute the change due to this transaction and update the
- # total balance at the same time.
- for date_posting in date_postings:
- change.add_position(date_posting)
- running_balance.add_position(date_posting)
- yield date_entry, date_postings, change, running_balance
-
- date_entries.clear()
- assert not date_entries
-
- if posting is not None:
- # De-dup multiple postings on the same transaction entry by
- # grouping their positions together.
- index = index_key(date_entries, entry, first, operator.is_)
- if index is None:
- date_entries.append((entry, [posting]))
- else:
- # We are indeed de-duping!
- postings = date_entries[index][1]
- postings.append(posting)
- else:
- # This is a regular entry; nothing to add/remove.
- date_entries.append((entry, []))
-
- # Flush the final dated entries if any, same as above.
- for date_entry, date_postings in date_entries:
- change = inventory.Inventory()
- if date_postings:
- for date_posting in date_postings:
- change.add_position(date_posting)
- running_balance.add_position(date_posting)
- yield date_entry, date_postings, change, running_balance
- date_entries.clear()
-
+
defiterate_with_balance(txn_postings):
+"""Iterate over the entries, accumulating the running balance.
+
+ For each entry, this yields tuples of the form:
+
+ (entry, postings, change, balance)
+
+ entry: This is the directive for this line. If the list contained Posting
+ instance, this yields the corresponding Transaction object.
+ postings: A list of postings on this entry that affect the balance. Only the
+ postings encountered in the input list are included; only those affect the
+ balance. If 'entry' is not a Transaction directive, this should always be
+ an empty list. We preserve the original ordering of the postings as they
+ appear in the input list.
+ change: An Inventory object that reflects the total change due to the
+ postings from this entry that appear in the list. For example, if a
+ Transaction has three postings and two are in the input list, the sum of
+ the two postings will be in the 'change' Inventory object. However, the
+ position for the transactions' third posting--the one not included in the
+ input list--will not be in this inventory.
+ balance: An Inventory object that reflects the balance *after* adding the
+ 'change' inventory due to this entry's transaction. The 'balance' yielded
+ is never None, even for entries that do not affect the balance, that is,
+ with an empty 'change' inventory. It's up to the caller, the one rendering
+ the entry, to decide whether to render this entry's change for a
+ particular entry type.
+
+ Note that if the input list of postings-or-entries is not in sorted date
+ order, two postings for the same entry appearing twice with a different date
+ in between will cause the entry appear twice. This is correct behavior, and
+ it is expected that in practice this should never happen anyway, because the
+ list of postings or entries should always be sorted. This method attempts to
+ detect this and raises an assertion if this is seen.
+
+ Args:
+ txn_postings: A list of postings or directive instances.
+ Postings affect the balance; other entries do not.
+ Yields:
+ Tuples of (entry, postings, change, balance) as described above.
+ """
+
+ # The running balance.
+ running_balance=inventory.Inventory()
+
+ # Previous date.
+ prev_date=None
+
+ # A list of entries at the current date.
+ date_entries=[]
+
+ first=lambdapair:pair[0]
+ fortxn_postingintxn_postings:
+ # Get the posting if we are dealing with one.
+ assertnotisinstance(txn_posting,Posting)
+ ifisinstance(txn_posting,TxnPosting):
+ posting=txn_posting.posting
+ entry=txn_posting.txn
+ else:
+ posting=None
+ entry=txn_posting
+
+ ifentry.date!=prev_date:
+ assert(
+ prev_dateisNoneorentry.date>prev_date
+ ),"Invalid date order for postings: {} > {}".format(prev_date,entry.date)
+ prev_date=entry.date
+
+ # Flush the dated entries.
+ fordate_entry,date_postingsindate_entries:
+ change=inventory.Inventory()
+ ifdate_postings:
+ # Compute the change due to this transaction and update the
+ # total balance at the same time.
+ fordate_postingindate_postings:
+ change.add_position(date_posting)
+ running_balance.add_position(date_posting)
+ yielddate_entry,date_postings,change,running_balance
+
+ date_entries.clear()
+ assertnotdate_entries
+
+ ifpostingisnotNone:
+ # De-dup multiple postings on the same transaction entry by
+ # grouping their positions together.
+ index=index_key(date_entries,entry,first,operator.is_)
+ ifindexisNone:
+ date_entries.append((entry,[posting]))
+ else:
+ # We are indeed de-duping!
+ postings=date_entries[index][1]
+ postings.append(posting)
+ else:
+ # This is a regular entry; nothing to add/remove.
+ date_entries.append((entry,[]))
+
+ # Flush the final dated entries if any, same as above.
+ fordate_entry,date_postingsindate_entries:
+ change=inventory.Inventory()
+ ifdate_postings:
+ fordate_postingindate_postings:
+ change.add_position(date_posting)
+ running_balance.add_position(date_posting)
+ yielddate_entry,date_postings,change,running_balance
+ date_entries.clear()
+
def postings_by_account(entries):
- """Create lists of postings and balances by account.
-
- This routine aggregates postings and entries grouping them by account name.
- The resulting lists contain postings in-lieu of Transaction directives, but
- the other directives are stored as entries. This yields a list of postings
- or other entries by account. All references to accounts are taken into
- account.
-
- Args:
- entries: A list of directives.
- Returns:
- A mapping of account name to list of TxnPosting instances or
- non-Transaction directives, sorted in the same order as the entries.
- """
- txn_postings_map = collections.defaultdict(list)
- for entry in entries:
-
- if isinstance(entry, Transaction):
- # Insert an entry for each of the postings.
- for posting in entry.postings:
- txn_postings_map[posting.account].append(
- TxnPosting(entry, posting))
-
- elif isinstance(entry, (Open, Close, Balance, Note, Document)):
- # Append some other entries in the realized list.
- txn_postings_map[entry.account].append(entry)
-
- elif isinstance(entry, Pad):
- # Insert the pad entry in both realized accounts.
- txn_postings_map[entry.account].append(entry)
- txn_postings_map[entry.source_account].append(entry)
-
- elif isinstance(entry, Custom):
- # Insert custom entry for each account in its values.
- for custom_value in entry.values:
- if custom_value.dtype == account.TYPE:
- txn_postings_map[custom_value.value].append(entry)
-
- return txn_postings_map
-
+
defpostings_by_account(entries):
+"""Create lists of postings and balances by account.
+
+ This routine aggregates postings and entries grouping them by account name.
+ The resulting lists contain postings in-lieu of Transaction directives, but
+ the other directives are stored as entries. This yields a list of postings
+ or other entries by account. All references to accounts are taken into
+ account.
+
+ Args:
+ entries: A list of directives.
+ Returns:
+ A mapping of account name to list of TxnPosting instances or
+ non-Transaction directives, sorted in the same order as the entries.
+ """
+ txn_postings_map=collections.defaultdict(list)
+ forentryinentries:
+ ifisinstance(entry,Transaction):
+ # Insert an entry for each of the postings.
+ forpostinginentry.postings:
+ txn_postings_map[posting.account].append(TxnPosting(entry,posting))
+
+ elifisinstance(entry,(Open,Close,Balance,Note,Document)):
+ # Append some other entries in the realized list.
+ txn_postings_map[entry.account].append(entry)
+
+ elifisinstance(entry,Pad):
+ # Insert the pad entry in both realized accounts.
+ txn_postings_map[entry.account].append(entry)
+ txn_postings_map[entry.source_account].append(entry)
+
+ elifisinstance(entry,Custom):
+ # Insert custom entry for each account in its values.
+ forcustom_valueinentry.values:
+ ifcustom_value.dtype==account.TYPE:
+ txn_postings_map[custom_value.value].append(entry)
+
+ returntxn_postings_map
+
def realize(entries, min_accounts=None, compute_balance=True):
- r"""Group entries by account, into a "tree" of realized accounts. RealAccount's
- are essentially containers for lists of postings and the final balance of
- each account, and may be non-leaf accounts (used strictly for organizing
- accounts into a hierarchy). This is then used to issue reports.
-
- The lists of postings in each account my be any of the entry types, except
- for Transaction, whereby Transaction entries are replaced by the specific
- Posting legs that belong to the account. Here's a simple diagram that
- summarizes this seemingly complex, but rather simple data structure:
-
- +-------------+ postings +------+
- | RealAccount |---------->| Open |
- +-------------+ +------+
- |
- v
- +------------+ +-------------+
- | TxnPosting |---->| Transaction |
- +------------+ +-------------+
- | \ \\\
- v `\.__ +---------+
- +-----+ `-------->| Posting |
- | Pad | +---------+
- +-----+
- |
- v
- +---------+
- | Balance |
- +---------+
- |
- v
- +-------+
- | Close |
- +-------+
- |
- .
-
- Args:
- entries: A list of directives.
- min_accounts: A list of strings, account names to ensure we create,
- regardless of whether there are postings on those accounts or not.
- This can be used to ensure the root accounts all exist.
- compute_balance: A boolean, true if we should compute the final
- balance on the realization.
- Returns:
- The root RealAccount instance.
- """
- # Create lists of the entries by account.
- txn_postings_map = postings_by_account(entries)
-
- # Create a RealAccount tree and compute the balance for each.
- real_root = RealAccount('')
- for account_name, txn_postings in txn_postings_map.items():
- real_account = get_or_create(real_root, account_name)
- real_account.txn_postings = txn_postings
- if compute_balance:
- real_account.balance = compute_postings_balance(txn_postings)
-
- # Ensure a minimum set of accounts that should exist. This is typically
- # called with an instance of AccountTypes to make sure that those exist.
- if min_accounts:
- for account_name in min_accounts:
- get_or_create(real_root, account_name)
-
- return real_root
-
+
defrealize(entries,min_accounts=None,compute_balance=True):
+r"""Group entries by account, into a "tree" of realized accounts. RealAccount's
+ are essentially containers for lists of postings and the final balance of
+ each account, and may be non-leaf accounts (used strictly for organizing
+ accounts into a hierarchy). This is then used to issue reports.
+
+ The lists of postings in each account my be any of the entry types, except
+ for Transaction, whereby Transaction entries are replaced by the specific
+ Posting legs that belong to the account. Here's a simple diagram that
+ summarizes this seemingly complex, but rather simple data structure:
+
+ +-------------+ postings +------+
+ | RealAccount |---------->| Open |
+ +-------------+ +------+
+ |
+ v
+ +------------+ +-------------+
+ | TxnPosting |---->| Transaction |
+ +------------+ +-------------+
+ | \ \\\
+ v `\.__ +---------+
+ +-----+ `-------->| Posting |
+ | Pad | +---------+
+ +-----+
+ |
+ v
+ +---------+
+ | Balance |
+ +---------+
+ |
+ v
+ +-------+
+ | Close |
+ +-------+
+ |
+ .
+
+ Args:
+ entries: A list of directives.
+ min_accounts: A list of strings, account names to ensure we create,
+ regardless of whether there are postings on those accounts or not.
+ This can be used to ensure the root accounts all exist.
+ compute_balance: A boolean, true if we should compute the final
+ balance on the realization.
+ Returns:
+ The root RealAccount instance.
+ """
+ # Create lists of the entries by account.
+ txn_postings_map=postings_by_account(entries)
+
+ # Create a RealAccount tree and compute the balance for each.
+ real_root=RealAccount("")
+ foraccount_name,txn_postingsintxn_postings_map.items():
+ real_account=get_or_create(real_root,account_name)
+ real_account.txn_postings=txn_postings
+ ifcompute_balance:
+ real_account.balance=compute_postings_balance(txn_postings)
+
+ # Ensure a minimum set of accounts that should exist. This is typically
+ # called with an instance of AccountTypes to make sure that those exist.
+ ifmin_accounts:
+ foraccount_nameinmin_accounts:
+ get_or_create(real_root,account_name)
+
+ returnreal_root
+
Code to help identify, extract, and file external downloads.
-
This package contains code to help you build importers and drive the process of
-identifying which importer to run on an externally downloaded file, extract
-transactions from them and file away these files under a clean and rigidly named
-hierarchy for preservation.
A file wrapper which acts as a cache for on-demand evaluation of conversions.
-
This object is used in lieu of a file in order to allow the various importers to
-reuse each others' conversion results. Converting file contents, e.g. PDF to
-text, can be expensive.
A converter that just reads the entire contents of a file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
num_bytes – The number of bytes to read.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A converter function.
-
-
-
-
-
-
- Source code in beancount/ingest/cache.py
-
def contents(filename):
- """A converter that just reads the entire contents of a file.
-
- Args:
- num_bytes: The number of bytes to read.
- Returns:
- A converter function.
- """
- # Attempt to detect the input encoding automatically, using chardet and a
- # decent amount of input.
- rawdata = open(filename, 'rb').read(HEAD_DETECT_MAX_BYTES)
- detected = chardet.detect(rawdata)
- encoding = detected['encoding']
-
- # Ignore encoding errors for reading the contents because input files
- # routinely break this assumption.
- errors = 'ignore'
-
- with open(filename, encoding=encoding, errors=errors) as file:
- return file.read()
-
Create or reuse a globally registered instance of a FileMemo.
-
Note: the FileMemo objects' lifetimes are reused for the duration of the
-process. This is usually the intended behavior. Always create them by
-calling this constructor.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – A path string, the absolute name of the file whose memo to create.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A FileMemo instance.
-
-
-
-
-
-
- Source code in beancount/ingest/cache.py
-
def get_file(filename):
- """Create or reuse a globally registered instance of a FileMemo.
-
- Note: the FileMemo objects' lifetimes are reused for the duration of the
- process. This is usually the intended behavior. Always create them by
- calling this constructor.
-
- Args:
- filename: A path string, the absolute name of the file whose memo to create.
- Returns:
- A FileMemo instance.
-
- """
- assert path.isabs(filename), (
- "Path should be absolute in order to guarantee a single call.")
- return _CACHE[filename]
-
A converter that just reads the first bytes of a file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
num_bytes – The number of bytes to read.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A converter function.
-
-
-
-
-
-
- Source code in beancount/ingest/cache.py
-
def head(num_bytes=8192):
- """A converter that just reads the first bytes of a file.
-
- Args:
- num_bytes: The number of bytes to read.
- Returns:
- A converter function.
- """
- def head_reader(filename):
- with open(filename, 'rb') as file:
- rawdata = file.read(num_bytes)
- detected = chardet.detect(rawdata)
- encoding = detected['encoding']
- return rawdata.decode(encoding)
- return head_reader
-
A converter that computes the MIME type of the file.
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A converter function.
-
-
-
-
-
-
- Source code in beancount/ingest/cache.py
-
def mimetype(filename):
- """A converter that computes the MIME type of the file.
-
- Returns:
- A converter function.
- """
- return file_type.guess_file_type(filename)
-
Read an import script and a list of downloaded filenames or directories of
-downloaded files, and for each of those files, extract transactions from it.
Given an importer configuration, search for files that can be imported in the
-list of files or directories, run the signature checks on them, and if it
-succeeds, run the importer on the file.
-
A list of entries for an existing ledger can be provided in order to perform
-de-duplication and a minimum date can be provided to filter out old entries.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
importer_config – A list of (regexps, importer) pairs, the configuration.
-
files_or_directories – A list of strings, filenames or directories to be processed.
-
output – A file object, to be written to.
-
entries – A list of directives loaded from the existing file for the newly
-extracted entries to be merged in.
-
options_map – The options parsed from existing file.
-
mindate – Optional minimum date to output transactions for.
-
ascending – A boolean, true to print entries in ascending order, false if
-descending is desired.
-
hooks – An optional list of hook functions to apply to the list of extract
-(filename, entries) pairs, in order. If not specified, find_duplicate_entries()
-is used, automatically.
-
-
-
-
-
-
- Source code in beancount/ingest/extract.py
-
def extract(importer_config,
- files_or_directories,
- output,
- entries=None,
- options_map=None,
- mindate=None,
- ascending=True,
- hooks=None):
- """Given an importer configuration, search for files that can be imported in the
- list of files or directories, run the signature checks on them, and if it
- succeeds, run the importer on the file.
-
- A list of entries for an existing ledger can be provided in order to perform
- de-duplication and a minimum date can be provided to filter out old entries.
-
- Args:
- importer_config: A list of (regexps, importer) pairs, the configuration.
- files_or_directories: A list of strings, filenames or directories to be processed.
- output: A file object, to be written to.
- entries: A list of directives loaded from the existing file for the newly
- extracted entries to be merged in.
- options_map: The options parsed from existing file.
- mindate: Optional minimum date to output transactions for.
- ascending: A boolean, true to print entries in ascending order, false if
- descending is desired.
- hooks: An optional list of hook functions to apply to the list of extract
- (filename, entries) pairs, in order. If not specified, find_duplicate_entries()
- is used, automatically.
- """
- allow_none_for_tags_and_links = (
- options_map and options_map["allow_deprecated_none_for_tags_and_links"])
-
- # Run all the importers and gather their result sets.
- new_entries_list = []
- for filename, importers in identify.find_imports(importer_config,
- files_or_directories):
- for importer in importers:
- # Import and process the file.
- try:
- new_entries = extract_from_file(
- filename,
- importer,
- existing_entries=entries,
- min_date=mindate,
- allow_none_for_tags_and_links=allow_none_for_tags_and_links)
- new_entries_list.append((filename, new_entries))
- except Exception as exc:
- logging.exception("Importer %s.extract() raised an unexpected error: %s",
- importer.name(), exc)
- continue
-
- # Find potential duplicate entries in the result sets, either against the
- # list of existing ones, or against each other. A single call to this
- # function is made on purpose, so that the function be able to merge
- # entries.
- if hooks is None:
- hooks = [find_duplicate_entries]
- for hook_fn in hooks:
- new_entries_list = hook_fn(new_entries_list, entries)
- assert isinstance(new_entries_list, list)
- assert all(isinstance(new_entries, tuple) for new_entries in new_entries_list)
- assert all(isinstance(new_entries[0], str) for new_entries in new_entries_list)
- assert all(isinstance(new_entries[1], list) for new_entries in new_entries_list)
-
- # Print out the results.
- output.write(HEADER)
- for key, new_entries in new_entries_list:
- output.write(identify.SECTION.format(key))
- output.write('\n')
- if not ascending:
- new_entries.reverse()
- print_extracted_entries(new_entries, output)
-
Import entries from file 'filename' with the given matches,
-
Also cross-check against a list of provided 'existing_entries' entries,
-de-duplicating and possibly auto-categorizing.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – The name of the file to import.
-
importer – An importer object that matched the file.
-
existing_entries – A list of existing entries parsed from a ledger, used to
-detect duplicates and automatically complete or categorize transactions.
-
min_date – A date before which entries should be ignored. This is useful
-when an account has a valid check/assert; we could just ignore whatever
-comes before, if desired.
-
allow_none_for_tags_and_links – A boolean, whether to allow plugins to
-generate Transaction objects with None as value for the 'tags' or 'links'
-attributes.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of new imported entries.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
Exception – If there is an error in the importer's extract() method.
-
-
-
-
-
-
- Source code in beancount/ingest/extract.py
-
def extract_from_file(filename, importer,
- existing_entries=None,
- min_date=None,
- allow_none_for_tags_and_links=False):
- """Import entries from file 'filename' with the given matches,
-
- Also cross-check against a list of provided 'existing_entries' entries,
- de-duplicating and possibly auto-categorizing.
-
- Args:
- filename: The name of the file to import.
- importer: An importer object that matched the file.
- existing_entries: A list of existing entries parsed from a ledger, used to
- detect duplicates and automatically complete or categorize transactions.
- min_date: A date before which entries should be ignored. This is useful
- when an account has a valid check/assert; we could just ignore whatever
- comes before, if desired.
- allow_none_for_tags_and_links: A boolean, whether to allow plugins to
- generate Transaction objects with None as value for the 'tags' or 'links'
- attributes.
- Returns:
- A list of new imported entries.
- Raises:
- Exception: If there is an error in the importer's extract() method.
- """
- # Extract the entries.
- file = cache.get_file(filename)
-
- # Note: Let the exception through on purpose. This makes developing
- # importers much easier by rendering the details of the exceptions.
- #
- # Note: For legacy support, support calling without the existing entries.
- kwargs = {}
- if 'existing_entries' in inspect.signature(importer.extract).parameters:
- kwargs['existing_entries'] = existing_entries
- new_entries = importer.extract(file, **kwargs)
- if not new_entries:
- return []
-
- # Make sure the newly imported entries are sorted; don't trust the importer.
- new_entries.sort(key=data.entry_sortkey)
-
- # Ensure that the entries are typed correctly.
- for entry in new_entries:
- data.sanity_check_types(entry, allow_none_for_tags_and_links)
-
- # Filter out entries with dates before 'min_date'.
- if min_date:
- new_entries = list(itertools.dropwhile(lambda x: x.date < min_date,
- new_entries))
-
- return new_entries
-
new_entries_list – A list of pairs of (key, lists of imported entries), one
-for each importer. The key identifies the filename and/or importer that
-yielded those new entries.
-
existing_entries – A list of previously existing entries from the target
-ledger.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of lists of modified new entries (like new_entries_list),
-potentially with modified metadata to indicate those which are duplicated.
-
-
-
-
-
-
- Source code in beancount/ingest/extract.py
-
def find_duplicate_entries(new_entries_list, existing_entries):
- """Flag potentially duplicate entries.
-
- Args:
- new_entries_list: A list of pairs of (key, lists of imported entries), one
- for each importer. The key identifies the filename and/or importer that
- yielded those new entries.
- existing_entries: A list of previously existing entries from the target
- ledger.
- Returns:
- A list of lists of modified new entries (like new_entries_list),
- potentially with modified metadata to indicate those which are duplicated.
- """
- mod_entries_list = []
- for key, new_entries in new_entries_list:
- # Find similar entries against the existing ledger only.
- duplicate_pairs = similar.find_similar_entries(new_entries, existing_entries)
-
- # Add a metadata marker to the extracted entries for duplicates.
- duplicate_set = set(id(entry) for entry, _ in duplicate_pairs)
- mod_entries = []
- for entry in new_entries:
- if id(entry) in duplicate_set:
- marked_meta = entry.meta.copy()
- marked_meta[DUPLICATE_META] = True
- entry = entry._replace(meta=marked_meta)
- mod_entries.append(entry)
- mod_entries_list.append((key, mod_entries))
- return mod_entries_list
-
def print_extracted_entries(entries, file):
- """Print a list of entries.
-
- Args:
- entries: A list of extracted entries.
- file: A file object to write to.
- """
- # Print the filename and which modules matched.
- # pylint: disable=invalid-name
- pr = lambda *args: print(*args, file=file)
- pr('')
-
- # Print out the entries.
- for entry in entries:
- # Check if this entry is a dup, and if so, comment it out.
- if DUPLICATE_META in entry.meta:
- meta = entry.meta.copy()
- meta.pop(DUPLICATE_META)
- entry = entry._replace(meta=meta)
- entry_string = textwrap.indent(printer.format_entry(entry), '; ')
- else:
- entry_string = printer.format_entry(entry)
- pr(entry_string)
-
- pr('')
-
Read an import script and a list of downloaded filenames or directories of
-downloaded files, and for each of those files, move the file under an account
-corresponding to the filing directory.
def add_arguments(parser):
- """Add arguments for the extract command."""
-
- parser.add_argument('-o', '--output', '--output-dir', '--destination',
- dest='output_dir', action='store',
- help="The root of the documents tree to move the files to.")
-
- parser.add_argument('-n', '--dry-run', action='store_true',
- help=("Just print where the files would be moved; "
- "don't actually move them."))
-
- parser.add_argument('--no-overwrite', dest='overwrite',
- action='store_false', default=True,
- help="Don't overwrite destination files with the same name.")
-
File importable files under a destination directory.
-
Given an importer configuration object, search for files that can be
-imported under the given list of files or directories and moved them under
-the given destination directory with the date computed by the module
-prepended to the filename. If the date cannot be extracted, use a reasonable
-default for the date (e.g. the last modified time of the file itself).
-
If 'mkdirs' is True, create the destination directories before moving the
-files.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
importer_config – A list of importer instances that define the config.
-
files_or_directories – a list of files of directories to walk recursively and
-hunt for files to import.
-
destination – A string, the root destination directory where the files are
-to be filed. The files are organized there under a hierarchy mirroring
-that of the chart of accounts.
-
dry_run – A flag, if true, don't actually move the files.
-
mkdirs – A flag, if true, make all the intervening directories; otherwise,
-fail to move files to non-existing dirs.
-
overwrite – A flag, if true, overwrite an existing destination file.
-
idify – A flag, if true, remove whitespace and funky characters in the destination
-filename.
-
logfile – A file object to write log entries to, or None, in which case no log is
-written out.
-
-
-
-
-
-
- Source code in beancount/ingest/file.py
-
def file(importer_config,
- files_or_directories,
- destination,
- dry_run=False,
- mkdirs=False,
- overwrite=False,
- idify=False,
- logfile=None):
- """File importable files under a destination directory.
-
- Given an importer configuration object, search for files that can be
- imported under the given list of files or directories and moved them under
- the given destination directory with the date computed by the module
- prepended to the filename. If the date cannot be extracted, use a reasonable
- default for the date (e.g. the last modified time of the file itself).
-
- If 'mkdirs' is True, create the destination directories before moving the
- files.
-
- Args:
- importer_config: A list of importer instances that define the config.
- files_or_directories: a list of files of directories to walk recursively and
- hunt for files to import.
- destination: A string, the root destination directory where the files are
- to be filed. The files are organized there under a hierarchy mirroring
- that of the chart of accounts.
- dry_run: A flag, if true, don't actually move the files.
- mkdirs: A flag, if true, make all the intervening directories; otherwise,
- fail to move files to non-existing dirs.
- overwrite: A flag, if true, overwrite an existing destination file.
- idify: A flag, if true, remove whitespace and funky characters in the destination
- filename.
- logfile: A file object to write log entries to, or None, in which case no log is
- written out.
- """
- jobs = []
- has_errors = False
- for filename, importers in identify.find_imports(importer_config,
- files_or_directories,
- logfile):
- # If we're debugging, print out the match text.
- # This option is useful when we're building our importer configuration,
- # to figure out which patterns to create as unique signatures.
- if not importers:
- continue
-
- # Process a single file.
- new_fullname = file_one_file(filename, importers, destination, idify, logfile)
- if new_fullname is None:
- continue
-
- # Check if the destination directory exists.
- new_dirname = path.dirname(new_fullname)
- if not path.exists(new_dirname) and not mkdirs:
- logging.error("Destination directory '{}' does not exist.".format(new_dirname))
- has_errors = True
- continue
-
- # Check if the destination file already exists; we don't want to clobber
- # it by accident.
- if not overwrite and path.exists(new_fullname):
- logging.error("Destination file '{}' already exists.".format(new_fullname))
- has_errors = True
- continue
-
- jobs.append((filename, new_fullname))
-
- # Check if any two imported files would be colliding in their destination
- # name, before we move anything.
- destmap = collections.defaultdict(list)
- for src, dest in jobs:
- destmap[dest].append(src)
- for dest, sources in destmap.items():
- if len(sources) != 1:
- logging.error("Collision in destination filenames '{}': from {}.".format(
- dest, ", ".join(["'{}'".format(source) for source in sources])))
- has_errors = True
-
- # If there are any errors, just don't do anything at all. This is a nicer
- # behaviour than moving just *some* files.
- if dry_run or has_errors:
- return
-
- # Actually carry out the moving job.
- for old_filename, new_filename in jobs:
- move_xdev_file(old_filename, new_filename, mkdirs)
-
- return jobs
-
Move a single filename using its matched importers.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – A string, the name of the downloaded file to be processed.
-
importers – A list of importer instances that handle this file.
-
destination – A string, the root destination directory where the files are
-to be filed. The files are organized there under a hierarchy mirroring
-that of the chart of accounts.
-
idify – A flag, if true, remove whitespace and funky characters in the destination
-filename.
-
logfile – A file object to write log entries to, or None, in which case no log is
-written out.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
The full new destination filename on success, and None if there was an error.
-
-
-
-
-
-
- Source code in beancount/ingest/file.py
-
def file_one_file(filename, importers, destination, idify=False, logfile=None):
- """Move a single filename using its matched importers.
-
- Args:
- filename: A string, the name of the downloaded file to be processed.
- importers: A list of importer instances that handle this file.
- destination: A string, the root destination directory where the files are
- to be filed. The files are organized there under a hierarchy mirroring
- that of the chart of accounts.
- idify: A flag, if true, remove whitespace and funky characters in the destination
- filename.
- logfile: A file object to write log entries to, or None, in which case no log is
- written out.
- Returns:
- The full new destination filename on success, and None if there was an error.
- """
- # Create an object to cache all the conversions between the importers
- # and phases and what-not.
- file = cache.get_file(filename)
-
- # Get the account corresponding to the file.
- file_accounts = []
- for index, importer in enumerate(importers):
- try:
- account_ = importer.file_account(file)
- except Exception as exc:
- account_ = None
- logging.exception("Importer %s.file_account() raised an unexpected error: %s",
- importer.name(), exc)
- if account_ is not None:
- file_accounts.append(account_)
-
- file_accounts_set = set(file_accounts)
- if not file_accounts_set:
- logging.error("No account provided by importers: {}".format(
- ", ".join(imp.name() for imp in importers)))
- return None
-
- if len(file_accounts_set) > 1:
- logging.warning("Ambiguous accounts from many importers: {}".format(
- ', '.join(file_accounts_set)))
- # Note: Don't exit; select the first matching importer's account.
-
- file_account = file_accounts.pop(0)
-
- # Given multiple importers, select the first one that was yielded to
- # obtain the date and process the filename.
- importer = importers[0]
-
- # Compute the date from the last modified time.
- mtime = path.getmtime(filename)
- mtime_date = datetime.datetime.fromtimestamp(mtime).date()
-
- # Try to get the file's date by calling a module support function. The
- # module may be able to extract the date from the filename, from the
- # contents of the file itself (e.g. scraping some text from the PDF
- # contents, or grabbing the last line of a CSV file).
- try:
- date = importer.file_date(file)
- except Exception as exc:
- logging.exception("Importer %s.file_date() raised an unexpected error: %s",
- importer.name(), exc)
- date = None
- if date is None:
- # Fallback on the last modified time of the file.
- date = mtime_date
- date_source = 'mtime'
- else:
- date_source = 'contents'
-
- # Apply filename renaming, if implemented.
- # Otherwise clean up the filename.
- try:
- clean_filename = importer.file_name(file)
-
- # Warn the importer implementor if a name is returned and it's an
- # absolute filename.
- if clean_filename and (path.isabs(clean_filename) or os.sep in clean_filename):
- logging.error(("The importer '%s' file_name() method should return a relative "
- "filename; the filename '%s' is absolute or contains path "
- "separators"),
- importer.name(), clean_filename)
- except Exception as exc:
- logging.exception("Importer %s.file_name() raised an unexpected error: %s",
- importer.name(), exc)
- clean_filename = None
- if clean_filename is None:
- # If no filename has been provided, use the basename.
- clean_filename = path.basename(file.name)
- elif re.match(r'\d\d\d\d-\d\d-\d\d', clean_filename):
- logging.error("The importer '%s' file_name() method should not date the "
- "returned filename. Implement file_date() instead.")
-
- # We need a simple filename; remove the directory part if there is one.
- clean_basename = path.basename(clean_filename)
-
- # Remove whitespace if requested.
- if idify:
- clean_basename = misc_utils.idify(clean_basename)
-
- # Prepend the date prefix.
- new_filename = '{0:%Y-%m-%d}.{1}'.format(date, clean_basename)
-
- # Prepend destination directory.
- new_fullname = path.normpath(path.join(destination,
- file_account.replace(account.sep, os.sep),
- new_filename))
-
- # Print the filename and which modules matched.
- if logfile is not None:
- logfile.write('Importer: {}\n'.format(importer.name() if importer else '-'))
- logfile.write('Account: {}\n'.format(file_account))
- logfile.write('Date: {} (from {})\n'.format(date, date_source))
- logfile.write('Destination: {}\n'.format(new_fullname))
- logfile.write('\n')
-
- return new_fullname
-
src_filename – A string, the name of the file to copy.
-
dst_filename – A string, where to copy the file.
-
mkdirs – A flag, true if we should create a non-existing destination directory.
-
-
-
-
-
-
- Source code in beancount/ingest/file.py
-
def move_xdev_file(src_filename, dst_filename, mkdirs=False):
- """Move a file, potentially across devices.
-
- Args:
- src_filename: A string, the name of the file to copy.
- dst_filename: A string, where to copy the file.
- mkdirs: A flag, true if we should create a non-existing destination directory.
- """
- # Create missing directory if required.
- dst_dirname = path.dirname(dst_filename)
- if mkdirs:
- if not path.exists(dst_dirname):
- os.makedirs(dst_dirname)
- else:
- if not path.exists(dst_dirname):
- raise OSError("Destination directory '{}' does not exist.".format(dst_dirname))
-
- # Copy the file to its new name.
- shutil.copyfile(src_filename, dst_filename)
-
- # Remove the old file. Note that we copy and remove to support
- # cross-device moves, because it's sensible that the destination might
- # be on an encrypted device.
- os.remove(src_filename)
-
def run(args, parser, importers_list, files_or_directories, hooks=None):
- """Run the subcommand."""
-
- # If the output directory is not specified, move the files at the root where
- # the import configuration file is located. (Providing this default seems
- # better than using a required option.)
- if args.output_dir is None:
- if hasattr(args, 'config'):
- args.output_dir = path.dirname(path.abspath(args.config))
- else:
- import __main__ # pylint: disable=import-outside-toplevel
- args.output_dir = path.dirname(path.abspath(__main__.__file__))
-
- # Make sure the output directory exists.
- if not path.exists(args.output_dir):
- parser.error('Output directory "{}" does not exist.'.format(args.output_dir))
-
- file(importers_list, files_or_directories, args.output_dir,
- dry_run=args.dry_run,
- mkdirs=True,
- overwrite=args.overwrite,
- idify=True,
- logfile=sys.stdout)
- return 0
-
Read an import script and a list of downloaded filenames or directories of
-2downloaded files, and for each of those files, identify which importer it should
-be associated with.
Given an importer configuration, search for files that can be imported in the
-list of files or directories, run the signature checks on them and return a list
-of (filename, importers), where 'importers' is a list of importers that matched
-the file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
importer_config – a list of importer instances that define the config.
-
files_or_directories – a list of files of directories to walk recursively and
- hunt for files to import.
-
logfile – A file object to write log entries to, or None, in which case no log is
-written out.
-
-
-
-
-
Yields:
- Triples of filename found, textified contents of the file, and list of
- importers matching this file.
-
-
- Source code in beancount/ingest/identify.py
-
def find_imports(importer_config, files_or_directories, logfile=None):
- """Given an importer configuration, search for files that can be imported in the
- list of files or directories, run the signature checks on them and return a list
- of (filename, importers), where 'importers' is a list of importers that matched
- the file.
-
- Args:
- importer_config: a list of importer instances that define the config.
- files_or_directories: a list of files of directories to walk recursively and
- hunt for files to import.
- logfile: A file object to write log entries to, or None, in which case no log is
- written out.
- Yields:
- Triples of filename found, textified contents of the file, and list of
- importers matching this file.
- """
- # Iterate over all files found; accumulate the entries by identification.
- for filename in file_utils.find_files(files_or_directories):
- if logfile is not None:
- logfile.write(SECTION.format(filename))
- logfile.write('\n')
-
- # Skip files that are simply too large.
- size = path.getsize(filename)
- if size > FILE_TOO_LARGE_THRESHOLD:
- logging.warning("File too large: '{}' ({} bytes); skipping.".format(
- filename, size))
- continue
-
- # For each of the sources the user has declared, identify which
- # match the text.
- file = cache.get_file(filename)
- matching_importers = []
- for importer in importer_config:
- try:
- matched = importer.identify(file)
- if matched:
- matching_importers.append(importer)
- except Exception as exc:
- logging.exception("Importer %s.identify() raised an unexpected error: %s",
- importer.name(), exc)
-
- yield (filename, matching_importers)
-
files_or_directories – A list of strings, files or directories.
-
-
-
-
-
-
- Source code in beancount/ingest/identify.py
-
def identify(importers_list, files_or_directories):
- """Run the identification loop.
-
- Args:
- importers_list: A list of importer instances.
- files_or_directories: A list of strings, files or directories.
- """
- logfile = sys.stdout
- for filename, importers in find_imports(importers_list, files_or_directories,
- logfile=logfile):
- file = cache.get_file(filename)
- for importer in importers:
- logfile.write('Importer: {}\n'.format(importer.name() if importer else '-'))
- logfile.write('Account: {}\n'.format(importer.file_account(file)))
- logfile.write('\n')
-
All importers must comply with this interface and implement at least some of its
-methods. A configuration consists in a simple list of such importer instances.
-The importer processes run through the importers, calling some of its methods in
-order to identify, extract and file the downloaded files.
-
Each of the methods accept a cache.FileMemo object which has a 'name' attribute
-with the filename to process, but which also provides a place to cache
-conversions. Use its convert() method whenever possible to avoid carrying out
-the same conversion multiple times. See beancount.ingest.cache for more details.
-
Synopsis:
-
name(): Return a unique identifier for the importer instance.
- identify(): Return true if the identifier is able to process the file.
- extract(): Extract directives from a file's contents and return of list of entries.
- file_account(): Return an account name associated with the given file for this importer.
- file_date(): Return a date associated with the downloaded file (e.g., the statement date).
- file_name(): Return a cleaned up filename for storage (optional).
-
Just to be clear: Although this importer will not raise NotImplementedError
-exceptions (it returns default values for each method), you NEED to derive from
-it in order to do anything meaningful. Simply instantiating this importer will
-not match not provide any useful information. It just defines the protocol for
-all importers.
If the importer would like to flag a returned transaction as a known
-duplicate, it may opt to set the special flag "duplicate" to True,
-and the transaction should be treated as a duplicate by the extraction
-code. This is a way to let the importer use particular information about
-previously imported transactions in order to flag them as duplicates.
-For example, if an importer has a way to get a persistent unique id for
-each of the imported transactions. (See this discussion for context:
-https://groups.google.com/d/msg/beancount/0iV-ipBJb8g/-uk4wsH2AgAJ)
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
existing_entries – An optional list of existing directives loaded from
-the ledger which is intended to contain the extracted entries. This
-is only provided if the user provides them via a flag in the
-extractor program.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of new, imported directives (usually mostly Transactions)
-extracted from the file.
-
-
-
-
-
-
- Source code in beancount/ingest/importer.py
-
def extract(self, file, existing_entries=None):
- """Extract transactions from a file.
-
- If the importer would like to flag a returned transaction as a known
- duplicate, it may opt to set the special flag "__duplicate__" to True,
- and the transaction should be treated as a duplicate by the extraction
- code. This is a way to let the importer use particular information about
- previously imported transactions in order to flag them as duplicates.
- For example, if an importer has a way to get a persistent unique id for
- each of the imported transactions. (See this discussion for context:
- https://groups.google.com/d/msg/beancount/0iV-ipBJb8g/-uk4wsH2AgAJ)
-
- Args:
- file: A cache.FileMemo instance.
- existing_entries: An optional list of existing directives loaded from
- the ledger which is intended to contain the extracted entries. This
- is only provided if the user provides them via a flag in the
- extractor program.
- Returns:
- A list of new, imported directives (usually mostly Transactions)
- extracted from the file.
- """
-
Note: If you don't implement this method you won't be able to move the
-files into its preservation hierarchy; the bean-file command won't
-work.
-
Also, normally the returned account is not a function of the input
-file--just of the importer--but it is provided anyhow.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
The name of the account that corresponds to this importer.
-
-
-
-
-
-
- Source code in beancount/ingest/importer.py
-
def file_account(self, file):
- """Return an account associated with the given file.
-
- Note: If you don't implement this method you won't be able to move the
- files into its preservation hierarchy; the bean-file command won't
- work.
-
- Also, normally the returned account is not a function of the input
- file--just of the importer--but it is provided anyhow.
-
- Args:
- file: A cache.FileMemo instance.
- Returns:
- The name of the account that corresponds to this importer.
- """
-
Attempt to obtain a date that corresponds to the given file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A date object, if successful, or None if a date could not be extracted.
-(If no date is returned, the file creation time is used. This is the
-default.)
-
-
-
-
-
-
- Source code in beancount/ingest/importer.py
-
def file_date(self, file):
- """Attempt to obtain a date that corresponds to the given file.
-
- Args:
- file: A cache.FileMemo instance.
- Returns:
- A date object, if successful, or None if a date could not be extracted.
- (If no date is returned, the file creation time is used. This is the
- default.)
- """
-
A filter that optionally renames a file before filing.
-
This is used to make tidy filenames for filed/stored document files. If
-you don't implement this and return None, the same filename is used.
-Note that if you return a filename, a simple, RELATIVE filename must be
-returned, not an absolute filename.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
The tidied up, new filename to store it as.
-
-
-
-
-
-
- Source code in beancount/ingest/importer.py
-
def file_name(self, file):
- """A filter that optionally renames a file before filing.
-
- This is used to make tidy filenames for filed/stored document files. If
- you don't implement this and return None, the same filename is used.
- Note that if you return a filename, a simple, RELATIVE filename must be
- returned, not an absolute filename.
-
- Args:
- file: A cache.FileMemo instance.
- Returns:
- The tidied up, new filename to store it as.
- """
-
Return true if this importer matches the given file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A boolean, true if this importer can handle this file.
-
-
-
-
-
-
- Source code in beancount/ingest/importer.py
-
def identify(self, file):
- """Return true if this importer matches the given file.
-
- Args:
- file: A cache.FileMemo instance.
- Returns:
- A boolean, true if this importer can handle this file.
- """
-
Mixin to add support for configuring importers with multiple accounts.
-
This importer implements some simple common functionality to create importers
-which accept a long number of account names or regular expressions on the set of
-account names. This is inspired by functionality in the importers in the
-previous iteration of the ingest code, which used to be its own project.
A mixin class which supports configuration of account names.
-
Mix this into the implementation of a importer.ImporterProtocol.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-beancount.ingest.importers.config.ConfigImporterMixin.__init__(self, config)
-
-
- special
-
-
-
-
-
-
-
Provide a list of accounts and regexps as configuration to the importer.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
config – A dict of configuration accounts, that must match the values
-declared in the class' REQUIRED_CONFIG.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/config.py
-
def __init__(self, config):
- """Provide a list of accounts and regexps as configuration to the importer.
-
- Args:
- config: A dict of configuration accounts, that must match the values
- declared in the class' REQUIRED_CONFIG.
- """
- super().__init__()
-
- # Check that the required configuration values are present.
- assert isinstance(config, dict), "Configuration must be a dict type"
- if not self._verify_config(config):
- raise ValueError("Invalid config {}, requires {}".format(
- config, self.REQUIRED_CONFIG))
- self.config = config
-
config – A dict of Col enum types to the names or indexes of the columns.
-
account – An account string, the account to post this to.
-
currency – A currency string, the currency of this account.
-
regexps – A list of regular expression strings.
-
skip_lines (int) – Skip first x (garbage) lines of file.
-
last4_map (Optional[Dict]) – A dict that maps last 4 digits of the card to a friendly string.
-
categorizer (Optional[Callable]) – A callable that attaches the other posting (usually expenses)
-to a transaction with only single posting.
-
institution (Optional[str]) – An optional name of an institution to rename the files to.
-
debug (bool) – Whether or not to print debug information
-
csv_dialect (Union[str, csv.Dialect]) – A csv dialect given either as string or as instance or
-subclass of csv.Dialect.
-
dateutil_kwds (Optional[Dict]) – An optional dict defining the dateutil parser kwargs.
-
narration_sep (str) – A string, a separator to use for splitting up the payee and
-narration fields of a source field.
-
encoding (Optional[str]) – An optional encoding for the file. Typically useful for files
-encoded in 'latin1' instead of 'utf-8' (the default).
-
invert_sign (Optional[bool]) – If true, invert the amount's sign unconditionally.
-
**kwds – Extra keyword arguments to provide to the base mixins.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/csv.py
-
def __init__(self, config, account, currency,
- regexps=None,
- skip_lines: int = 0,
- last4_map: Optional[Dict] = None,
- categorizer: Optional[Callable] = None,
- institution: Optional[str] = None,
- debug: bool = False,
- csv_dialect: Union[str, csv.Dialect] = 'excel',
- dateutil_kwds: Optional[Dict] = None,
- narration_sep: str = '; ',
- encoding: Optional[str] = None,
- invert_sign: Optional[bool] = False,
- **kwds):
- """Constructor.
-
- Args:
- config: A dict of Col enum types to the names or indexes of the columns.
- account: An account string, the account to post this to.
- currency: A currency string, the currency of this account.
- regexps: A list of regular expression strings.
- skip_lines: Skip first x (garbage) lines of file.
- last4_map: A dict that maps last 4 digits of the card to a friendly string.
- categorizer: A callable that attaches the other posting (usually expenses)
- to a transaction with only single posting.
- institution: An optional name of an institution to rename the files to.
- debug: Whether or not to print debug information
- csv_dialect: A `csv` dialect given either as string or as instance or
- subclass of `csv.Dialect`.
- dateutil_kwds: An optional dict defining the dateutil parser kwargs.
- narration_sep: A string, a separator to use for splitting up the payee and
- narration fields of a source field.
- encoding: An optional encoding for the file. Typically useful for files
- encoded in 'latin1' instead of 'utf-8' (the default).
- invert_sign: If true, invert the amount's sign unconditionally.
- **kwds: Extra keyword arguments to provide to the base mixins.
- """
- assert isinstance(config, dict), "Invalid type: {}".format(config)
- self.config = config
-
- self.currency = currency
- assert isinstance(skip_lines, int)
- self.skip_lines = skip_lines
- self.last4_map = last4_map or {}
- self.debug = debug
- self.dateutil_kwds = dateutil_kwds
- self.csv_dialect = csv_dialect
- self.narration_sep = narration_sep
- self.encoding = encoding
- self.invert_sign = invert_sign
-
- self.categorizer = categorizer
-
- # Prepare kwds for filing mixin.
- kwds['filing'] = account
- if institution:
- prefix = kwds.get('prefix', None)
- assert prefix is None
- kwds['prefix'] = institution
-
- # Prepare kwds for identifier mixin.
- if isinstance(regexps, str):
- regexps = [regexps]
- matchers = kwds.setdefault('matchers', [])
- matchers.append(('mime', 'text/csv'))
- if regexps:
- for regexp in regexps:
- matchers.append(('content', regexp))
-
- super().__init__(**kwds)
-
If the importer would like to flag a returned transaction as a known
-duplicate, it may opt to set the special flag "duplicate" to True,
-and the transaction should be treated as a duplicate by the extraction
-code. This is a way to let the importer use particular information about
-previously imported transactions in order to flag them as duplicates.
-For example, if an importer has a way to get a persistent unique id for
-each of the imported transactions. (See this discussion for context:
-https://groups.google.com/d/msg/beancount/0iV-ipBJb8g/-uk4wsH2AgAJ)
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
existing_entries – An optional list of existing directives loaded from
-the ledger which is intended to contain the extracted entries. This
-is only provided if the user provides them via a flag in the
-extractor program.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of new, imported directives (usually mostly Transactions)
-extracted from the file.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/csv.py
-
def extract(self, file, existing_entries=None):
- account = self.file_account(file)
- entries = []
-
- # Normalize the configuration to fetch by index.
- iconfig, has_header = normalize_config(
- self.config, file.head(), self.csv_dialect, self.skip_lines)
-
- reader = iter(csv.reader(open(file.name, encoding=self.encoding),
- dialect=self.csv_dialect))
-
- # Skip garbage lines
- for _ in range(self.skip_lines):
- next(reader)
-
- # Skip header, if one was detected.
- if has_header:
- next(reader)
-
- def get(row, ftype):
- try:
- return row[iconfig[ftype]] if ftype in iconfig else None
- except IndexError: # FIXME: this should not happen
- return None
-
- # Parse all the transactions.
- first_row = last_row = None
- for index, row in enumerate(reader, 1):
- if not row:
- continue
- if row[0].startswith('#'):
- continue
-
- # If debugging, print out the rows.
- if self.debug:
- print(row)
-
- if first_row is None:
- first_row = row
- last_row = row
-
- # Extract the data we need from the row, based on the configuration.
- date = get(row, Col.DATE)
- txn_date = get(row, Col.TXN_DATE)
- txn_time = get(row, Col.TXN_TIME)
-
- payee = get(row, Col.PAYEE)
- if payee:
- payee = payee.strip()
-
- fields = filter(None, [get(row, field)
- for field in (Col.NARRATION1,
- Col.NARRATION2,
- Col.NARRATION3)])
- narration = self.narration_sep.join(
- field.strip() for field in fields).replace('\n', '; ')
-
- tag = get(row, Col.TAG)
- tags = {tag} if tag is not None else data.EMPTY_SET
-
- link = get(row, Col.REFERENCE_ID)
- links = {link} if link is not None else data.EMPTY_SET
-
- last4 = get(row, Col.LAST4)
-
- balance = get(row, Col.BALANCE)
-
- # Create a transaction
- meta = data.new_metadata(file.name, index)
- if txn_date is not None:
- meta['date'] = parse_date_liberally(txn_date,
- self.dateutil_kwds)
- if txn_time is not None:
- meta['time'] = str(dateutil.parser.parse(txn_time).time())
- if balance is not None:
- meta['balance'] = D(balance)
- if last4:
- last4_friendly = self.last4_map.get(last4.strip())
- meta['card'] = last4_friendly if last4_friendly else last4
- date = parse_date_liberally(date, self.dateutil_kwds)
- txn = data.Transaction(meta, date, self.FLAG, payee, narration,
- tags, links, [])
-
- # Attach one posting to the transaction
- amount_debit, amount_credit = self.get_amounts(iconfig, row)
-
- # Skip empty transactions
- if amount_debit is None and amount_credit is None:
- continue
-
- for amount in [amount_debit, amount_credit]:
- if amount is None:
- continue
- if self.invert_sign:
- amount = -amount
- units = Amount(amount, self.currency)
- txn.postings.append(
- data.Posting(account, units, None, None, None, None))
-
- # Attach the other posting(s) to the transaction.
- if isinstance(self.categorizer, collections.abc.Callable):
- txn = self.categorizer(txn)
-
- # Add the transaction to the output list
- entries.append(txn)
-
- # Figure out if the file is in ascending or descending order.
- first_date = parse_date_liberally(get(first_row, Col.DATE),
- self.dateutil_kwds)
- last_date = parse_date_liberally(get(last_row, Col.DATE),
- self.dateutil_kwds)
- is_ascending = first_date < last_date
-
- # Reverse the list if the file is in descending order
- if not is_ascending:
- entries = list(reversed(entries))
-
- # Add a balance entry if possible
- if Col.BALANCE in iconfig and entries:
- entry = entries[-1]
- date = entry.date + datetime.timedelta(days=1)
- balance = entry.meta.get('balance', None)
- if balance is not None:
- meta = data.new_metadata(file.name, index)
- entries.append(
- data.Balance(meta, date,
- account, Amount(balance, self.currency),
- None, None))
-
- # Remove the 'balance' metadata.
- for entry in entries:
- entry.meta.pop('balance', None)
-
- return entries
-
-
-
- Source code in beancount/ingest/importers/csv.py
-
def file_date(self, file):
- "Get the maximum date from the file."
- iconfig, has_header = normalize_config(
- self.config, file.head(), self.csv_dialect, self.skip_lines)
- if Col.DATE in iconfig:
- reader = iter(csv.reader(open(file.name), dialect=self.csv_dialect))
- for _ in range(self.skip_lines):
- next(reader)
- if has_header:
- next(reader)
- max_date = None
- for row in reader:
- if not row:
- continue
- if row[0].startswith('#'):
- continue
- date_str = row[iconfig[Col.DATE]]
- date = parse_date_liberally(date_str, self.dateutil_kwds)
- if max_date is None or date > max_date:
- max_date = date
- return max_date
-
This method is present to allow clients to override it in order to deal
-with special cases, e.g., columns with currency symbols in them.
-
-
- Source code in beancount/ingest/importers/csv.py
-
def get_amounts(self, iconfig, row, allow_zero_amounts=False):
- """See function get_amounts() for details.
-
- This method is present to allow clients to override it in order to deal
- with special cases, e.g., columns with currency symbols in them.
- """
- return get_amounts(iconfig, row, allow_zero_amounts)
-
row – A row array containing the values of the given row.
-
allow_zero_amounts – Is a transaction with amount D('0.00') okay? If not,
-return (None, None).
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A pair of (debit-amount, credit-amount), both of which are either an
-instance of Decimal or None, or not available.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/csv.py
-
def get_amounts(iconfig, row, allow_zero_amounts=False):
- """Get the amount columns of a row.
-
- Args:
- iconfig: A dict of Col to row index.
- row: A row array containing the values of the given row.
- allow_zero_amounts: Is a transaction with amount D('0.00') okay? If not,
- return (None, None).
- Returns:
- A pair of (debit-amount, credit-amount), both of which are either an
- instance of Decimal or None, or not available.
- """
- debit, credit = None, None
- if Col.AMOUNT in iconfig:
- credit = row[iconfig[Col.AMOUNT]]
- else:
- debit, credit = [row[iconfig[col]] if col in iconfig else None
- for col in [Col.AMOUNT_DEBIT, Col.AMOUNT_CREDIT]]
-
- # If zero amounts aren't allowed, return null value.
- is_zero_amount = ((credit is not None and D(credit) == ZERO) and
- (debit is not None and D(debit) == ZERO))
- if not allow_zero_amounts and is_zero_amount:
- return (None, None)
-
- return (-D(debit) if debit else None,
- D(credit) if credit else None)
-
Using the header line, convert the configuration field name lookups to int indexes.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
config – A dict of Col types to string or indexes.
-
head – A string, some decent number of bytes of the head of the file.
-
dialect – A dialect definition to parse the header
-
skip_lines (int) – Skip first x (garbage) lines of file.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A pair of
- A dict of Col types to integer indexes of the fields, and
- a boolean, true if the file has a header.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
ValueError – If there is no header and the configuration does not consist
-entirely of integer indexes.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/csv.py
-
def normalize_config(config, head, dialect='excel', skip_lines: int = 0):
- """Using the header line, convert the configuration field name lookups to int indexes.
-
- Args:
- config: A dict of Col types to string or indexes.
- head: A string, some decent number of bytes of the head of the file.
- dialect: A dialect definition to parse the header
- skip_lines: Skip first x (garbage) lines of file.
- Returns:
- A pair of
- A dict of Col types to integer indexes of the fields, and
- a boolean, true if the file has a header.
- Raises:
- ValueError: If there is no header and the configuration does not consist
- entirely of integer indexes.
- """
- # Skip garbage lines before sniffing the header
- assert isinstance(skip_lines, int)
- assert skip_lines >= 0
- for _ in range(skip_lines):
- head = head[head.find('\n')+1:]
-
- has_header = csv.Sniffer().has_header(head)
- if has_header:
- header = next(csv.reader(io.StringIO(head), dialect=dialect))
- field_map = {field_name.strip(): index
- for index, field_name in enumerate(header)}
- index_config = {}
- for field_type, field in config.items():
- if isinstance(field, str):
- field = field_map[field]
- index_config[field_type] = field
- else:
- if any(not isinstance(field, int)
- for field_type, field in config.items()):
- raise ValueError("CSV config without header has non-index fields: "
- "{}".format(config))
- index_config = config
- return index_config, has_header
-
Check the configuration account provided by the user against the accounts
-required by the source importer.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
config – A config dict of actual values on an importer.
-
schema – A dict of declarations of required values.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
ValueError – If the configuration is invalid.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A validated configuration dict.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/mixins/config.py
-
def validate_config(config, schema, importer):
- """Check the configuration account provided by the user against the accounts
- required by the source importer.
-
- Args:
- config: A config dict of actual values on an importer.
- schema: A dict of declarations of required values.
- Raises:
- ValueError: If the configuration is invalid.
- Returns:
- A validated configuration dict.
- """
- provided_options = set(config)
- required_options = set(schema)
-
- for option in (required_options - provided_options):
- raise ValueError("Missing value from user configuration for importer {}: {}".format(
- importer.__class__.__name__, option))
-
- for option in (provided_options - required_options):
- raise ValueError("Unknown value in user configuration for importer {}: {}".format(
- importer.__class__.__name__, option))
-
- # FIXME: Validate types as well, including account type as a default.
-
- # FIXME: Here we could validate account names by looking them up from the
- # existing ledger.
-
- return config
-
It also sports an optional prefix to prepend to the renamed filename. Typically
-you can put the name of the institution there, so you get a renamed filename
-like this:
-beancount.ingest.importers.mixins.filing.FilingMixin.__init__(self, **kwds)
-
-
- special
-
-
-
-
-
-
-
Pull 'filing' and 'prefix' from kwds.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filing – The name of the account to file to.
-
prefix – The name of the institution prefix to insert.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/mixins/filing.py
-
def __init__(self, **kwds):
- """Pull 'filing' and 'prefix' from kwds.
-
- Args:
- filing: The name of the account to file to.
- prefix: The name of the institution prefix to insert.
- """
-
- self.filing_account = kwds.pop('filing', None)
- assert account.is_valid(self.filing_account)
-
- self.prefix = kwds.pop('prefix', None)
-
- super().__init__(**kwds)
-
remap – A dict of 'part' to list-of-compiled-regexp objects, where each item is
-a specification to match against its part. The 'part' can be one of 'mime',
-'filename' or 'content'.
-
converter – A
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A boolean, true if the file is not rejected by the constraints.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/mixins/identifier.py
-
def identify(remap, converter, file):
- """Identify the contents of a file.
-
- Args:
- remap: A dict of 'part' to list-of-compiled-regexp objects, where each item is
- a specification to match against its part. The 'part' can be one of 'mime',
- 'filename' or 'content'.
- converter: A
- Returns:
- A boolean, true if the file is not rejected by the constraints.
- """
- if remap.get('mime', None):
- mimetype = file.convert(cache.mimetype)
- if not all(regexp.search(mimetype)
- for regexp in remap['mime']):
- return False
-
- if remap.get('filename', None):
- if not all(regexp.search(file.name)
- for regexp in remap['filename']):
- return False
-
- if remap.get('content', None):
- # If this is a text file, read the whole thing in memory.
- text = file.convert(converter or cache.contents)
- if not all(regexp.search(text)
- for regexp in remap['content']):
- return False
-
- return True
-
This importer will parse a single account in the OFX file. Instantiate it
-multiple times with different accounts if it has many accounts. It makes more
-sense to do it this way so that you can define your importer configuration
-account by account.
-
Note that this importer is provided as an example and with no guarantees. It's
-not really super great. On the other hand, I've been using it for more than five
-years over multiple accounts, so it has been useful to me (it works, by some
-measure of "works"). If you need a more powerful or compliant OFX importer
-please consider either writing one or contributing changes. Also, this importer
-does its own very basic parsing; a better one would probably use (and depend on)
-the ofxparse module (see https://sites.google.com/site/ofxparse/).
Create a new importer posting to the given account.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
account – An account string, the account onto which to post all the
-amounts parsed.
-
acctid_regexp – A regexp, to match against the <ACCTID> tag of the OFX file.
-
basename – An optional string, the name of the new files.
-
balance_type – An enum of type BalanceType.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def __init__(self, acctid_regexp, account, basename=None,
- balance_type=BalanceType.DECLARED):
- """Create a new importer posting to the given account.
-
- Args:
- account: An account string, the account onto which to post all the
- amounts parsed.
- acctid_regexp: A regexp, to match against the <ACCTID> tag of the OFX file.
- basename: An optional string, the name of the new files.
- balance_type: An enum of type BalanceType.
- """
- self.acctid_regexp = acctid_regexp
- self.account = account
- self.basename = basename
- self.balance_type = balance_type
-
Return true if this importer matches the given file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
file – A cache.FileMemo instance.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A boolean, true if this importer can handle this file.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def identify(self, file):
- # Match for a compatible MIME type.
- if file.mimetype() not in {'application/x-ofx',
- 'application/vnd.intu.qbo',
- 'application/vnd.intu.qfx'}:
- return False
-
- # Match the account id.
- return any(re.match(self.acctid_regexp, acctid)
- for acctid in find_acctids(file.contents()))
-
account – An account string, the account to insert.
-
currency – A currency string.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A Transaction instance.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def build_transaction(stmttrn, flag, account, currency):
- """Build a single transaction.
-
- Args:
- stmttrn: A <STMTTRN> bs4.element.Tag.
- flag: A single-character string.
- account: An account string, the account to insert.
- currency: A currency string.
- Returns:
- A Transaction instance.
- """
- # Find the date.
- date = parse_ofx_time(find_child(stmttrn, 'dtposted')).date()
-
- # There's no distinct payee.
- payee = None
-
- # Construct a description that represents all the text content in the node.
- name = find_child(stmttrn, 'name', saxutils.unescape)
- memo = find_child(stmttrn, 'memo', saxutils.unescape)
-
- # Remove memos duplicated from the name.
- if memo == name:
- memo = None
-
- # Add the transaction type to the description, unless it's not useful.
- trntype = find_child(stmttrn, 'trntype', saxutils.unescape)
- if trntype in ('DEBIT', 'CREDIT'):
- trntype = None
-
- narration = ' / '.join(filter(None, [name, memo, trntype]))
-
- # Create a single posting for it; the user will have to manually categorize
- # the other side.
- number = find_child(stmttrn, 'trnamt', D)
- units = amount.Amount(number, currency)
- posting = data.Posting(account, units, None, None, None, None)
-
- # Build the transaction with a single leg.
- fileloc = data.new_metadata('<build_transaction>', 0)
- return data.Transaction(fileloc, date, flag, payee, narration,
- data.EMPTY_SET, data.EMPTY_SET, [posting])
-
acctid_regexp – A regular expression string matching the account we're interested in.
-
account – An account string onto which to post the amounts found in the file.
-
flag – A single-character string.
-
balance_type – An enum of type BalanceType.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A sorted list of entries.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def extract(soup, filename, acctid_regexp, account, flag, balance_type):
- """Extract transactions from an OFX file.
-
- Args:
- soup: A BeautifulSoup root node.
- acctid_regexp: A regular expression string matching the account we're interested in.
- account: An account string onto which to post the amounts found in the file.
- flag: A single-character string.
- balance_type: An enum of type BalanceType.
- Returns:
- A sorted list of entries.
- """
- new_entries = []
- counter = itertools.count()
- for acctid, currency, transactions, balance in find_statement_transactions(soup):
- if not re.match(acctid_regexp, acctid):
- continue
-
- # Create Transaction directives.
- stmt_entries = []
- for stmttrn in transactions:
- entry = build_transaction(stmttrn, flag, account, currency)
- entry = entry._replace(meta=data.new_metadata(filename, next(counter)))
- stmt_entries.append(entry)
- stmt_entries = data.sorted(stmt_entries)
- new_entries.extend(stmt_entries)
-
- # Create a Balance directive.
- if balance and balance_type is not BalanceType.NONE:
- date, number = balance
- if balance_type is BalanceType.LAST and stmt_entries:
- date = stmt_entries[-1].date
-
- # The Balance assertion occurs at the beginning of the date, so move
- # it to the following day.
- date += datetime.timedelta(days=1)
-
- meta = data.new_metadata(filename, next(counter))
- balance_entry = data.Balance(meta, date, account,
- amount.Amount(number, currency),
- None, None)
- new_entries.append(balance_entry)
-
- return data.sorted(new_entries)
-
contents – A string, the contents of the OFX file.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of strings, the contents of the <ACCTID> tags.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def find_acctids(contents):
- """Find the list of <ACCTID> tags.
-
- Args:
- contents: A string, the contents of the OFX file.
- Returns:
- A list of strings, the contents of the <ACCTID> tags.
- """
- # Match the account id. Don't bother parsing the entire thing as XML, just
- # match the tag for this purpose. This'll work fine enough.
- for match in re.finditer('<ACCTID>([^<]*)', contents):
- yield match.group(1)
-
Find a child under the given node and return its value.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
node – A <STMTTRN> bs4.element.Tag.
-
name – A string, the name of the child node.
-
conversion – A callable object used to convert the value to a new data type.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A string, or None.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def find_child(node, name, conversion=None):
- """Find a child under the given node and return its value.
-
- Args:
- node: A <STMTTRN> bs4.element.Tag.
- name: A string, the name of the child node.
- conversion: A callable object used to convert the value to a new data type.
- Returns:
- A string, or None.
- """
- child = node.find(name)
- if not child:
- return None
- value = child.contents[0].strip()
- if conversion:
- value = conversion(value)
- return value
-
A string, the first currency found in the file. Returns None if no currency
-is found.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def find_currency(soup):
- """Find the first currency in the XML tree.
-
- Args:
- soup: A BeautifulSoup root node.
- Returns:
- A string, the first currency found in the file. Returns None if no currency
- is found.
- """
- for stmtrs in soup.find_all(re.compile('.*stmtrs$')):
- for currency_node in stmtrs.find_all('curdef'):
- currency = currency_node.contents[0]
- if currency is not None:
- return currency
-
Find the statement transaction sections in the file.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
soup – A BeautifulSoup root node.
-
-
-
-
-
Yields:
- A trip of
- An account id string,
- A currency string,
- A list of transaction nodes (<STMTTRN> BeautifulSoup tags), and
- A (date, balance amount) for the <LEDGERBAL>.
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def find_statement_transactions(soup):
- """Find the statement transaction sections in the file.
-
- Args:
- soup: A BeautifulSoup root node.
- Yields:
- A trip of
- An account id string,
- A currency string,
- A list of transaction nodes (<STMTTRN> BeautifulSoup tags), and
- A (date, balance amount) for the <LEDGERBAL>.
- """
- # Process STMTTRNRS and CCSTMTTRNRS tags.
- for stmtrs in soup.find_all(re.compile('.*stmtrs$')):
- # For each CURDEF tag.
- for currency_node in stmtrs.find_all('curdef'):
- currency = currency_node.contents[0].strip()
-
- # Extract ACCTID account information.
- acctid_node = stmtrs.find('acctid')
- if acctid_node:
- acctid = next(acctid_node.children).strip()
- else:
- acctid = ''
-
- # Get the LEDGERBAL node. There appears to be a single one for all
- # transaction lists.
- ledgerbal = stmtrs.find('ledgerbal')
- balance = None
- if ledgerbal:
- dtasof = find_child(ledgerbal, 'dtasof', parse_ofx_time).date()
- balamt = find_child(ledgerbal, 'balamt', D)
- balance = (dtasof, balamt)
-
- # Process transaction lists (regular or credit-card).
- for tranlist in stmtrs.find_all(re.compile('(|bank|cc)tranlist')):
- yield acctid, currency, tranlist.find_all('stmttrn'), balance
-
Parse an OFX time string and return a datetime object.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
date_str – A string, the date to be parsed.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A datetime.datetime instance.
-
-
-
-
-
-
- Source code in beancount/ingest/importers/ofx.py
-
def parse_ofx_time(date_str):
- """Parse an OFX time string and return a datetime object.
-
- Args:
- date_str: A string, the date to be parsed.
- Returns:
- A datetime.datetime instance.
- """
- if len(date_str) < 14:
- return datetime.datetime.strptime(date_str[:8], '%Y%m%d')
- else:
- return datetime.datetime.strptime(date_str[:14], '%Y%m%d%H%M%S')
-
Support for implementing regression tests on sample files using nose.
-
NOTE: This itself is not a regression test. It's a library used to create
-regression tests for your importers. Use it like this in your own importer code:
WARNING: This is deprecated. Nose itself has been deprecated for a while and
-Beancount is now using only pytest. Ignore this and use
-beancount.ingest.regression_ptest instead.
Extract entries from a test file and compare against expected output.
-
If an expected file (as <filename>.extract) is not present, we issue a
-warning. Missing expected files can be written out by removing them
-before running the tests.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – A string, the name of the file to import using self.importer.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
AssertionError – If the contents differ from the expected file.
-
-
-
-
-
-
- Source code in beancount/ingest/regression.py
-
@test_utils.skipIfRaises(ToolNotInstalled)
-def test_expect_extract(self, filename, msg):
- """Extract entries from a test file and compare against expected output.
-
- If an expected file (as <filename>.extract) is not present, we issue a
- warning. Missing expected files can be written out by removing them
- before running the tests.
-
- Args:
- filename: A string, the name of the file to import using self.importer.
- Raises:
- AssertionError: If the contents differ from the expected file.
-
- """
- # Import the file.
- entries = extract.extract_from_file(filename, self.importer, None, None)
-
- # Render the entries to a string.
- oss = io.StringIO()
- printer.print_entries(entries, file=oss)
- string = oss.getvalue()
-
- expect_filename = '{}.extract'.format(filename)
- if path.exists(expect_filename):
- expect_string = open(expect_filename, encoding='utf-8').read()
- self.assertEqual(expect_string.strip(), string.strip())
- else:
- # Write out the expected file for review.
- open(expect_filename, 'w', encoding='utf-8').write(string)
- self.skipTest("Expected file not present; generating '{}'".format(
- expect_filename))
-
Compute the imported file date and compare to an expected output.
-
If an expected file (as <filename>.file_date) is not present, we issue a
-warning. Missing expected files can be written out by removing them
-before running the tests.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – A string, the name of the file to import using self.importer.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
AssertionError – If the contents differ from the expected file.
-
-
-
-
-
-
- Source code in beancount/ingest/regression.py
-
@test_utils.skipIfRaises(ToolNotInstalled)
-def test_expect_file_date(self, filename, msg):
- """Compute the imported file date and compare to an expected output.
-
- If an expected file (as <filename>.file_date) is not present, we issue a
- warning. Missing expected files can be written out by removing them
- before running the tests.
-
- Args:
- filename: A string, the name of the file to import using self.importer.
- Raises:
- AssertionError: If the contents differ from the expected file.
- """
- # Import the date.
- file = cache.get_file(filename)
- date = self.importer.file_date(file)
- if date is None:
- self.fail("No date produced from {}".format(file.name))
-
- expect_filename = '{}.file_date'.format(file.name)
- if path.exists(expect_filename) and path.getsize(expect_filename) > 0:
- expect_date_str = open(expect_filename, encoding='utf-8').read().strip()
- expect_date = datetime.datetime.strptime(expect_date_str, '%Y-%m-%d').date()
- self.assertEqual(expect_date, date)
- else:
- # Write out the expected file for review.
- with open(expect_filename, 'w', encoding='utf-8') as outfile:
- print(date.strftime('%Y-%m-%d'), file=outfile)
- self.skipTest("Expected file not present; generating '{}'".format(
- expect_filename))
-
Compute the imported file name and compare to an expected output.
-
If an expected file (as <filename>.file_name) is not present, we issue a
-warning. Missing expected files can be written out by removing them
-before running the tests.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – A string, the name of the file to import using self.importer.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
AssertionError – If the contents differ from the expected file.
-
-
-
-
-
-
- Source code in beancount/ingest/regression.py
-
@test_utils.skipIfRaises(ToolNotInstalled)
-def test_expect_file_name(self, filename, msg):
- """Compute the imported file name and compare to an expected output.
-
- If an expected file (as <filename>.file_name) is not present, we issue a
- warning. Missing expected files can be written out by removing them
- before running the tests.
-
- Args:
- filename: A string, the name of the file to import using self.importer.
- Raises:
- AssertionError: If the contents differ from the expected file.
- """
- # Import the date.
- file = cache.get_file(filename)
- generated_basename = self.importer.file_name(file)
- if generated_basename is None:
- self.fail("No filename produced from {}".format(filename))
-
- # Check that we're getting a non-null relative simple filename.
- self.assertFalse(path.isabs(generated_basename), generated_basename)
- self.assertNotRegex(generated_basename, os.sep)
-
- expect_filename = '{}.file_name'.format(file.name)
- if path.exists(expect_filename) and path.getsize(expect_filename) > 0:
- expect_filename = open(expect_filename, encoding='utf-8').read().strip()
- self.assertEqual(expect_filename, generated_basename)
- else:
- # Write out the expected file for review.
- with open(expect_filename, 'w', encoding='utf-8') as file:
- print(generated_basename, file=file)
- self.skipTest("Expected file not present; generating '{}'".format(
- expect_filename))
-
Attempt to identify a file and expect results to be true.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
filename – A string, the name of the file to import using self.importer.
-
-
-
-
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
AssertionError – If the contents differ from the expected file.
-
-
-
-
-
-
- Source code in beancount/ingest/regression.py
-
@test_utils.skipIfRaises(ToolNotInstalled)
-def test_expect_identify(self, filename, msg):
- """Attempt to identify a file and expect results to be true.
-
- Args:
- filename: A string, the name of the file to import using self.importer.
- Raises:
- AssertionError: If the contents differ from the expected file.
- """
- file = cache.get_file(filename)
- matched = self.importer.identify(file)
- self.assertTrue(matched)
-
An error to be used by converters when necessary software isn't there.
-
Raising this exception from your converter code when the tool is not
-installed will make the tests defined in this file skipped instead of
-failing. This will happen when you test your converters on different
-computers and/or platforms.
directory – A string, the directory to scour for sample files or a filename
- in that directory. If a directory is not provided, the directory of
- the file from which the importer class is defined is used.
-
ignore_cls – An optional base class of the importer whose methods should
-not trigger the addition of a test. For example, if you are deriving
-from a base class which is already well-tested, you may not want to have
-a regression test case generated for those methods. This was used to
-ignore methods provided from a common backwards compatibility support
-class.
-
-
-
-
-
Yields:
- Generated tests as per nose's requirements (a callable and arguments for
- it).
-
-
- Source code in beancount/ingest/regression.py
-
@deprecated("Use beancount.ingest.regression_pytest instead")
-def compare_sample_files(importer, directory=None, ignore_cls=None):
- """Compare the sample files under a directory.
-
- Args:
- importer: An instance of an Importer.
- directory: A string, the directory to scour for sample files or a filename
- in that directory. If a directory is not provided, the directory of
- the file from which the importer class is defined is used.
- ignore_cls: An optional base class of the importer whose methods should
- not trigger the addition of a test. For example, if you are deriving
- from a base class which is already well-tested, you may not want to have
- a regression test case generated for those methods. This was used to
- ignore methods provided from a common backwards compatibility support
- class.
- Yields:
- Generated tests as per nose's requirements (a callable and arguments for
- it).
- """
- # If the directory is not specified, use the directory where the importer
- # class was defined.
- if not directory:
- directory = sys.modules[type(importer).__module__].__file__
- if path.isfile(directory):
- directory = path.dirname(directory)
-
- for filename in find_input_files(directory):
- # For each of the methods to be tested, check if there is an actual
- # implementation and if so, run a comparison with an expected file.
- for name in ['identify',
- 'extract',
- 'file_date',
- 'file_name']:
- # Check if the method has been overridden from the protocol
- # interface. If so, even if it's provided by concretely inherited
- # method, we want to require a test against that method.
- func = getattr(importer, name).__func__
- if (func is not getattr(ImporterProtocol, name) and
- (ignore_cls is None or (func is not getattr(ignore_cls, name, None)))):
- method = getattr(ImportFileTestCase(importer),
- 'test_expect_{}'.format(name))
- yield (method, filename, name)
-
Find the input files in the module where the class is defined.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
directory – A string, the path to a root directory to check for.
-
-
-
-
-
Yields:
- Strings, the absolute filenames of sample input and expected files.
-
-
- Source code in beancount/ingest/regression.py
-
def find_input_files(directory):
- """Find the input files in the module where the class is defined.
-
- Args:
- directory: A string, the path to a root directory to check for.
- Yields:
- Strings, the absolute filenames of sample input and expected files.
- """
- for sroot, dirs, files in os.walk(directory):
- for filename in files:
- if re.match(r'.*\.(extract|file_date|file_name|py|pyc|DS_Store)$', filename):
- continue
- yield path.join(sroot, filename)
-
Support for implementing regression tests on sample files using pytest.
-
This module provides definitions for testing a custom importer against a set of
-existing downloaded files, running the various importer interface methods on it,
-and comparing the output to an expected text file. (Expected test files can be
-auto-generated using the --generate option). You use it like this:
# Create your importer instance used for testing.
- importer = mymodule.Importer(...)
-
# Select a directory where your test files are to be located.
- directory = ...
-
# Create a test case using the base in this class.
-
@regression_pytest.with_importer(importer)
- @regression_pytest.with_testdir(directory)
- class TestImporter(regtest.ImporterTestBase):
- pass
-
Also, to add the --generate option to 'pytest', you must create a conftest.py
-somewhere in one of the roots above your importers with this module as a plugin:
See beancount/example/ingest for a full working example.
-
How to invoke the tests:
-
Via pytest. First run your test with the --generate option to generate all the
-expected files. Then inspect them visually for correctness. Finally, check them
-in to preserve them. You should be able to regress against those correct outputs
-in the future. Use version control to your advantage to visualize the
-differences.
Compute the imported file date and compare to an expected output.
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def test_file_date(self, importer, file, pytestconfig):
- """Compute the imported file date and compare to an expected output."""
- date = importer.file_date(file)
- string = date.isoformat() if date else ''
- compare_contents_or_generate(string, '{}.file_date'.format(file.name),
- pytestconfig.getoption("generate", False))
-
Compute the imported file name and compare to an expected output.
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def test_file_name(self, importer, file, pytestconfig):
- """Compute the imported file name and compare to an expected output."""
- filename = importer.file_name(file) or ''
- compare_contents_or_generate(filename, '{}.file_name'.format(file.name),
- pytestconfig.getoption("generate", False))
-
Attempt to identify a file and expect results to be true.
-
This method does not need to check against an existing expect file. It
-is just assumed it should return True if your test is setup well (the
-importer should always identify the test file).
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def test_identify(self, importer, file):
- """Attempt to identify a file and expect results to be true.
-
- This method does not need to check against an existing expect file. It
- is just assumed it should return True if your test is setup well (the
- importer should always identify the test file).
- """
- assert importer.identify(file)
-
Compare a string to the contents of an expect file.
-
Assert if different; auto-generate otherwise.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
actual_string – The expected string contents.
-
expect_fn – The filename whose contents to read and compare against.
-
generate – A boolean, true if we are to generate the tests.
-
-
-
-
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def compare_contents_or_generate(actual_string, expect_fn, generate):
- """Compare a string to the contents of an expect file.
-
- Assert if different; auto-generate otherwise.
-
- Args:
- actual_string: The expected string contents.
- expect_fn: The filename whose contents to read and compare against.
- generate: A boolean, true if we are to generate the tests.
- """
- if generate:
- with open(expect_fn, 'w', encoding='utf-8') as expect_file:
- expect_file.write(actual_string)
- if actual_string and not actual_string.endswith('\n'):
- expect_file.write('\n')
- pytest.skip("Generated '{}'".format(expect_fn))
- else:
- # Run the test on an existing expected file.
- assert path.exists(expect_fn), (
- "Expected file '{}' is missing. Generate it?".format(expect_fn))
- with open(expect_fn, encoding='utf-8') as infile:
- expect_string = infile.read()
- assert expect_string.strip() == actual_string.strip()
-
Find the input files in the module where the class is defined.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
directory – A string, the path to a root directory to check for.
-
-
-
-
-
Yields:
- Strings, the absolute filenames of sample input and expected files.
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def find_input_files(directory):
- """Find the input files in the module where the class is defined.
-
- Args:
- directory: A string, the path to a root directory to check for.
- Yields:
- Strings, the absolute filenames of sample input and expected files.
- """
- for sroot, dirs, files in os.walk(directory):
- for filename in files:
- if re.match(r'.*\.(extract|file_date|file_name|file_account|py|pyc|DS_Store)$',
- filename):
- continue
- yield path.join(sroot, filename)
-
Add an option to generate the expected files for the tests.
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def pytest_addoption(parser):
- """Add an option to generate the expected files for the tests."""
- group = parser.getgroup("beancount")
- group.addoption("--generate", "--gen", action="store_true",
- help="Don't test; rather, generate the expected files")
-
Parametrizing fixture that provides files from a directory.
-
-
- Source code in beancount/ingest/regression_pytest.py
-
def with_testdir(directory):
- """Parametrizing fixture that provides files from a directory."""
- return pytest.mark.parametrize(
- "file", [cache.get_file(fn) for fn in find_input_files(directory)])
-
Create an arguments parser for all the ingestion bean-tools.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
description (str) – The program description string.
-
func – A callable function to run the particular command.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
An argparse.Namespace instance with the rest of arguments in 'rest'.
-
-
-
-
-
-
- Source code in beancount/ingest/scripts_utils.py
-
def create_legacy_arguments_parser(description: str, run_func: callable):
- """Create an arguments parser for all the ingestion bean-tools.
-
- Args:
- description: The program description string.
- func: A callable function to run the particular command.
- Returns:
- An argparse.Namespace instance with the rest of arguments in 'rest'.
- """
- parser = version.ArgumentParser(description=description)
-
- parser.add_argument('config', action='store', metavar='CONFIG_FILENAME',
- help=('Importer configuration file. '
- 'This is a Python file with a data structure that '
- 'is specific to your accounts'))
-
- parser.add_argument('downloads', nargs='+', metavar='DIR-OR-FILE',
- default=[],
- help='Filenames or directories to search for files to import')
-
- parser.set_defaults(command=run_func)
-
- return parser
-
This more explicit way of invoking the ingestion is now the preferred way to
-invoke the various tools, and replaces calling the bean-identify,
-bean-extract, bean-file tools with a --config argument. When you call the
-import script itself (as as program) it will parse the arguments, expecting
-a subcommand ('identify', 'extract' or 'file') and corresponding
-subcommand-specific arguments.
-
Here you can override some importer values, such as installing a custom
-duplicate finding hook, and eventually more. Note that this newer invocation
-method is optional and if it is not present, a call to ingest() is generated
-implicitly, and it functions as it used to. Future configurable
-customization of the ingestion process will be implemented by inserting new
-arguments to this function, this is the motivation behind doing this.
-
Note that invocation by the three bean-* ingestion tools is still supported,
-and calling ingest() explicitly from your import configuration file will not
-break these tools either, if you invoke them on it; the values you provide
-to this function will be used by those tools.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
importers_list – A list of importer instances. This is used as a
-chain-of-responsibility, called on each file.
-
detect_duplicates_func – (DEPRECATED) An optional function which accepts a
-list of lists of imported entries and a list of entries already existing
-in the user's ledger. See function find_duplicate_entries(), which is
-the default implementation for this. Use 'filter_funcs' instead.
-
hooks – An optional list of hook functions to apply to the list of extract
-(filename, entries) pairs, in order. This replaces
-'detect_duplicates_func'.
-
-
-
-
-
-
- Source code in beancount/ingest/scripts_utils.py
-
def ingest(importers_list, detect_duplicates_func=None, hooks=None):
- """Driver function that calls all the ingestion tools.
-
- Put a call to this function at the end of your importer configuration to
- make your import script; this should be its main function, like this:
-
- from beancount.ingest.scripts_utils import ingest
- my_importers = [ ... ]
- ingest(my_importers)
-
- This more explicit way of invoking the ingestion is now the preferred way to
- invoke the various tools, and replaces calling the bean-identify,
- bean-extract, bean-file tools with a --config argument. When you call the
- import script itself (as as program) it will parse the arguments, expecting
- a subcommand ('identify', 'extract' or 'file') and corresponding
- subcommand-specific arguments.
-
- Here you can override some importer values, such as installing a custom
- duplicate finding hook, and eventually more. Note that this newer invocation
- method is optional and if it is not present, a call to ingest() is generated
- implicitly, and it functions as it used to. Future configurable
- customization of the ingestion process will be implemented by inserting new
- arguments to this function, this is the motivation behind doing this.
-
- Note that invocation by the three bean-* ingestion tools is still supported,
- and calling ingest() explicitly from your import configuration file will not
- break these tools either, if you invoke them on it; the values you provide
- to this function will be used by those tools.
-
- Args:
- importers_list: A list of importer instances. This is used as a
- chain-of-responsibility, called on each file.
- detect_duplicates_func: (DEPRECATED) An optional function which accepts a
- list of lists of imported entries and a list of entries already existing
- in the user's ledger. See function find_duplicate_entries(), which is
- the default implementation for this. Use 'filter_funcs' instead.
- hooks: An optional list of hook functions to apply to the list of extract
- (filename, entries) pairs, in order. This replaces
- 'detect_duplicates_func'.
- """
- if detect_duplicates_func is not None:
- warnings.warn("Argument 'detect_duplicates_func' is deprecated.")
- # Fold it in hooks.
- if hooks is None:
- hooks = []
- hooks.insert(0, detect_duplicates_func)
- del detect_duplicates_func
-
- if ingest.args is not None:
- # The script has been called from one of the bean-* ingestion tools.
- # 'ingest.args' is only set when we're being invoked from one of the
- # bean-xxx tools (see below).
-
- # Mark this function as called, so that if it is called from an import
- # triggered by one of the ingestion tools, it won't be called again
- # afterwards.
- ingest.was_called = True
-
- # Use those args rather than to try to parse the command-line arguments
- # from a naked ingest() call as a script. {39c7af4f6af5}
- args, parser = ingest.args
- else:
- # The script is called directly. This is the main program of the import
- # script itself. This is the new invocation method.
- parser = version.ArgumentParser(description=DESCRIPTION)
-
- # Use required on subparsers.
- # FIXME: Remove this when we require version 3.7 or above.
- kwargs = {}
- if sys.version_info >= (3, 7):
- kwargs['required'] = True
- subparsers = parser.add_subparsers(dest='command', **kwargs)
-
- parser.add_argument('--downloads', '-d', metavar='DIR-OR-FILE',
- action='append', default=[],
- help='Filenames or directories to search for files to import')
-
- for cmdname, module in [('identify', identify),
- ('extract', extract),
- ('file', file)]:
- parser_cmd = subparsers.add_parser(cmdname, help=module.DESCRIPTION)
- parser_cmd.set_defaults(command=module.run)
- module.add_arguments(parser_cmd)
-
- args = parser.parse_args()
-
- if not args.downloads:
- args.downloads.append(os.getcwd())
-
- # Implement required ourselves.
- # FIXME: Remove this when we require version 3.7 or above.
- if not (sys.version_info >= (3, 7)):
- if not hasattr(args, 'command'):
- parser.error("Subcommand is required.")
-
- abs_downloads = list(map(path.abspath, args.downloads))
- args.command(args, parser, importers_list, abs_downloads, hooks=hooks)
- return 0
-
Run the import script and optionally call ingest().
-
This path is only called when trampolined by one of the bean-* ingestion
-tools.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
parser – The parser instance, used only to report errors.
-
importers_attr_name – The name of the special attribute in the module which
-defines the importers list.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
An execution return code.
-
-
-
-
-
-
- Source code in beancount/ingest/scripts_utils.py
-
def run_import_script_and_ingest(parser, argv=None, importers_attr_name='CONFIG'):
- """Run the import script and optionally call ingest().
-
- This path is only called when trampolined by one of the bean-* ingestion
- tools.
-
- Args:
- parser: The parser instance, used only to report errors.
- importers_attr_name: The name of the special attribute in the module which
- defines the importers list.
- Returns:
- An execution return code.
- """
- args = parser.parse_args(args=argv)
-
- # Check the existence of the config.
- if not path.exists(args.config) or path.isdir(args.config):
- parser.error("File does not exist: '{}'".format(args.config))
-
- # Check the existence of all specified files.
- for filename in args.downloads:
- if not path.exists(filename):
- parser.error("File does not exist: '{}'".format(filename))
-
- # Reset the state of ingest() being called (for unit tests, which use the
- # same runtime with run_with_args).
- ingest.was_called = False
-
- # Save the arguments parsed from the command-line as default for
- # {39c7af4f6af5}.
- ingest.args = args, parser
-
- # Evaluate the importer script/module.
- mod = runpy.run_path(args.config)
-
- # If the importer script has already called ingest() within itself, don't
- # call it again. We're done. This allows the use to insert an explicit call
- # to ingest() while still running the bean-* ingestion tools on the file.
- if ingest.was_called:
- return 0
-
- # ingest() hasn't been called by the script so we assume it isn't
- # present in it. So we now run the ingestion by ourselves here, without
- # specifying any of the newer optional arguments.
- importers_list = mod[importers_attr_name]
- return ingest(importers_list)
-
Parse arguments for bean tool, import config script and ingest.
-
This function is called by the three bean-* tools to support the older
-import files, which only required a CONFIG object to be defined in them.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
module – One of the identify, extract or file module objects.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
An execution return code.
-
-
-
-
-
-
- Source code in beancount/ingest/scripts_utils.py
-
def trampoline_to_ingest(module):
- """Parse arguments for bean tool, import config script and ingest.
-
- This function is called by the three bean-* tools to support the older
- import files, which only required a CONFIG object to be defined in them.
-
- Args:
- module: One of the identify, extract or file module objects.
- Returns:
- An execution return code.
- """
- # Disable debugging logging which is turned on by default in chardet.
- logging.getLogger('chardet.charsetprober').setLevel(logging.INFO)
- logging.getLogger('chardet.universaldetector').setLevel(logging.INFO)
-
- parser = create_legacy_arguments_parser(module.DESCRIPTION, module.run)
- module.add_arguments(parser)
- return run_import_script_and_ingest(parser)
-
This comparator needs to be able to handle Transaction instances which are
-incomplete on one side, which have slightly different dates, or potentially
-slightly different numbers.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-beancount.ingest.similar.SimilarityComparator.__call__(self, entry1, entry2)
-
-
- special
-
-
-
-
-
-
-
Compare two entries, return true if they are deemed similar.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
entry1 – A first Transaction directive.
-
entry2 – A second Transaction directive.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A boolean.
-
-
-
-
-
-
- Source code in beancount/ingest/similar.py
-
def __call__(self, entry1, entry2):
- """Compare two entries, return true if they are deemed similar.
-
- Args:
- entry1: A first Transaction directive.
- entry2: A second Transaction directive.
- Returns:
- A boolean.
- """
- # Check the date difference.
- if self.max_date_delta is not None:
- delta = ((entry1.date - entry2.date)
- if entry1.date > entry2.date else
- (entry2.date - entry1.date))
- if delta > self.max_date_delta:
- return False
-
- try:
- amounts1 = self.cache[id(entry1)]
- except KeyError:
- amounts1 = self.cache[id(entry1)] = amounts_map(entry1)
- try:
- amounts2 = self.cache[id(entry2)]
- except KeyError:
- amounts2 = self.cache[id(entry2)] = amounts_map(entry2)
-
- # Look for amounts on common accounts.
- common_keys = set(amounts1) & set(amounts2)
- for key in sorted(common_keys):
- # Compare the amounts.
- number1 = amounts1[key]
- number2 = amounts2[key]
- if number1 == ZERO and number2 == ZERO:
- break
- diff = abs((number1 / number2)
- if number2 != ZERO
- else (number2 / number1))
- if diff == ZERO:
- return False
- if diff < ONE:
- diff = ONE/diff
- if (diff - ONE) < self.EPSILON:
- break
- else:
- return False
-
- # Here, we have found at least one common account with a close
- # amount. Now, we require that the set of accounts are equal or that
- # one be a subset of the other.
- accounts1 = set(posting.account for posting in entry1.postings)
- accounts2 = set(posting.account for posting in entry2.postings)
- return accounts1.issubset(accounts2) or accounts2.issubset(accounts1)
-
-
-
-
-
-
-
-
-
-
-
-
-
-beancount.ingest.similar.SimilarityComparator.__init__(self, max_date_delta=None)
-
-
- special
-
-
-
-
-
-
-
Constructor a comparator of entries.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
max_date_delta – A datetime.timedelta instance of the max tolerated
-distance between dates.
-
-
-
-
-
-
- Source code in beancount/ingest/similar.py
-
def __init__(self, max_date_delta=None):
- """Constructor a comparator of entries.
- Args:
- max_date_delta: A datetime.timedelta instance of the max tolerated
- distance between dates.
- """
- self.cache = {}
- self.max_date_delta = max_date_delta
-
Find which entries from a list are potential duplicates of a set.
-
Note: If there are multiple entries from 'source_entries' matching an entry
-in 'entries', only the first match is returned. Note that this function
-could in theory decide to merge some of the imported entries with each
-other.
-
-
-
-
-
-
-
-
-
Parameters:
-
-
-
entries – The list of entries to classify as duplicate or note.
-
source_entries – The list of entries against which to match. This is the
-previous, or existing set of entries to compare against. This may be null
-or empty.
-
comparator – A functor used to establish the similarity of two entries.
-
window_days – The number of days (inclusive) before or after to scan the
-entries to classify against.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of pairs of entries (entry, source_entry) where entry is from
-'entries' and is deemed to be a duplicate of source_entry, from
-'source_entries'.
-
-
-
-
-
-
- Source code in beancount/ingest/similar.py
-
def find_similar_entries(entries, source_entries, comparator=None, window_days=2):
- """Find which entries from a list are potential duplicates of a set.
-
- Note: If there are multiple entries from 'source_entries' matching an entry
- in 'entries', only the first match is returned. Note that this function
- could in theory decide to merge some of the imported entries with each
- other.
-
- Args:
- entries: The list of entries to classify as duplicate or note.
- source_entries: The list of entries against which to match. This is the
- previous, or existing set of entries to compare against. This may be null
- or empty.
- comparator: A functor used to establish the similarity of two entries.
- window_days: The number of days (inclusive) before or after to scan the
- entries to classify against.
- Returns:
- A list of pairs of entries (entry, source_entry) where entry is from
- 'entries' and is deemed to be a duplicate of source_entry, from
- 'source_entries'.
- """
- window_head = datetime.timedelta(days=window_days)
- window_tail = datetime.timedelta(days=window_days + 1)
-
- if comparator is None:
- comparator = SimilarityComparator()
-
- # For each of the new entries, look at existing entries at a nearby date.
- duplicates = []
- if source_entries is not None:
- for entry in data.filter_txns(entries):
- for source_entry in data.filter_txns(
- data.iter_entry_dates(source_entries,
- entry.date - window_head,
- entry.date + window_tail)):
- if comparator(entry, source_entry):
- duplicates.append((entry, source_entry))
- break
- return duplicates
-
options_map – The target map in which we want to aggregate attributes.
Note: This value is mutated in-place.
-
src_options_map – A source map whose values we'd like to see aggregated.
+
other_options_map – A list of other options maps, some of whose values
+we'd like to see aggregated.
@@ -440,23 +436,32 @@
Source code in beancount/loader.py
-
def aggregate_options_map(options_map, src_options_map):
- """Aggregate some of the attributes of options map.
-
- Args:
- options_map: The target map in which we want to aggregate attributes.
- Note: This value is mutated in-place.
- src_options_map: A source map whose values we'd like to see aggregated.
- """
- op_currencies = options_map["operating_currency"]
- for currency in src_options_map["operating_currency"]:
- if currency not in op_currencies:
- op_currencies.append(currency)
-
- commodities = options_map["commodities"]
- for currency in src_options_map["commodities"]:
- commodities.add(currency)
-
+
defaggregate_options_map(options_map,other_options_map):
+"""Aggregate some of the attributes of options map.
+
+ Args:
+ options_map: The target map in which we want to aggregate attributes.
+ Note: This value is mutated in-place.
+ other_options_map: A list of other options maps, some of whose values
+ we'd like to see aggregated.
+ """
+ options_map=copy.copy(options_map)
+
+ currencies=list(options_map["operating_currency"])
+ foromapinother_options_map:
+ currencies.extend(omap["operating_currency"])
+ options_map["dcontext"].update_from(omap["dcontext"])
+ options_map["operating_currency"]=list(misc_utils.uniquify(currencies))
+
+ # Produce a 'pythonpath' value for transformers.
+ pythonpath=set()
+ foromapinitertools.chain((options_map,),other_options_map):
+ ifomap.get("insert_pythonpath",False):
+ pythonpath.add(path.dirname(omap["filename"]))
+ options_map["pythonpath"]=sorted(pythonpath)
+
+ returnoptions_map
+
def combine_plugins(*plugin_modules):
- """Combine the plugins from the given plugin modules.
-
- This is used to create plugins of plugins.
- Args:
- *plugins_modules: A sequence of module objects.
- Returns:
- A list that can be assigned to the new module's __plugins__ attribute.
- """
- modules = []
- for module in plugin_modules:
- modules.extend([getattr(module, name)
- for name in module.__plugins__])
- return modules
-
+
defcombine_plugins(*plugin_modules):
+"""Combine the plugins from the given plugin modules.
+
+ This is used to create plugins of plugins.
+ Args:
+ *plugins_modules: A sequence of module objects.
+ Returns:
+ A list that can be assigned to the new module's __plugins__ attribute.
+ """
+ modules=[]
+ formoduleinplugin_modules:
+ modules.extend([getattr(module,name)fornameinmodule.__plugins__])
+ returnmodules
+
def compute_input_hash(filenames):
- """Compute a hash of the input data.
-
- Args:
- filenames: A list of input files. Order is not relevant.
- """
- md5 = hashlib.md5()
- for filename in sorted(filenames):
- md5.update(filename.encode('utf8'))
- if not path.exists(filename):
- continue
- stat = os.stat(filename)
- md5.update(struct.pack('dd', stat.st_mtime_ns, stat.st_size))
- return md5.hexdigest()
-
+
defcompute_input_hash(filenames):
+"""Compute a hash of the input data.
+
+ Args:
+ filenames: A list of input files. Order is not relevant.
+ """
+ md5=hashlib.md5()
+ forfilenameinsorted(filenames):
+ md5.update(filename.encode("utf8"))
+ ifnotpath.exists(filename):
+ continue
+ stat=os.stat(filename)
+ md5.update(struct.pack("dd",stat.st_mtime_ns,stat.st_size))
+ returnmd5.hexdigest()
+
def delete_cache_function(cache_getter, function):
- """A wrapper that removes the cached filename.
-
- Args:
- cache_getter: A function of one argument, the top-level filename, which
- will return the name of the corresponding cache file.
- function: A function object to decorate for caching.
- Returns:
- A decorated function which will delete the cached filename, if it exists.
- """
- @functools.wraps(function)
- def wrapped(toplevel_filename, *args, **kw):
- # Delete the cache.
- cache_filename = cache_getter(toplevel_filename)
- if path.exists(cache_filename):
- os.remove(cache_filename)
-
- # Invoke the original function.
- return function(toplevel_filename, *args, **kw)
- return wrapped
-
+
defdelete_cache_function(cache_getter,function):
+"""A wrapper that removes the cached filename.
+
+ Args:
+ cache_getter: A function of one argument, the top-level filename, which
+ will return the name of the corresponding cache file.
+ function: A function object to decorate for caching.
+ Returns:
+ A decorated function which will delete the cached filename, if it exists.
+ """
+
+ @functools.wraps(function)
+ defwrapped(toplevel_filename,*args,**kw):
+ # Delete the cache.
+ cache_filename=cache_getter(toplevel_filename)
+ ifpath.exists(cache_filename):
+ os.remove(cache_filename)
+
+ # Invoke the original function.
+ returnfunction(toplevel_filename,*args,**kw)
+
+ returnwrapped
+
def get_cache_filename(pattern: str, filename: str) -> str:
- """Compute the cache filename from a given pattern and the top-level filename.
-
- Args:
- pattern: A cache filename or pattern. If the pattern contains '{filename}' this
- will get replaced by the top-level filename. This may be absolute or relative.
- filename: The top-level filename.
- Returns:
- The resolved cache filename.
- """
- abs_filename = path.abspath(filename)
- if path.isabs(pattern):
- abs_pattern = pattern
- else:
- abs_pattern = path.join(path.dirname(abs_filename), pattern)
- return abs_pattern.format(filename=path.basename(filename))
-
+
defget_cache_filename(pattern:str,filename:str)->str:
+"""Compute the cache filename from a given pattern and the top-level filename.
+
+ Args:
+ pattern: A cache filename or pattern. If the pattern contains '{filename}' this
+ will get replaced by the top-level filename. This may be absolute or relative.
+ filename: The top-level filename.
+ Returns:
+ The resolved cache filename.
+ """
+ abs_filename=path.abspath(filename)
+ ifpath.isabs(pattern):
+ abs_pattern=pattern
+ else:
+ abs_pattern=path.join(path.dirname(abs_filename),pattern)
+ returnabs_pattern.format(filename=path.basename(filename))
+
def initialize(use_cache: bool, cache_filename: Optional[str] = None):
- """Initialize the loader."""
-
- # Unless an environment variable disables it, use the pickle load cache
- # automatically. Note that this works across all Python programs running the
- # loader which is why it's located here.
- # pylint: disable=invalid-name
- global _load_file
-
- # Make a function to compute the cache filename.
- cache_pattern = (cache_filename or
- os.getenv('BEANCOUNT_LOAD_CACHE_FILENAME') or
- PICKLE_CACHE_FILENAME)
- cache_getter = functools.partial(get_cache_filename, cache_pattern)
-
- if use_cache:
- _load_file = pickle_cache_function(cache_getter, PICKLE_CACHE_THRESHOLD,
- _uncached_load_file)
- else:
- if cache_filename is not None:
- logging.warning("Cache disabled; "
- "Explicitly overridden cache filename %s will be ignored.",
- cache_filename)
- _load_file = delete_cache_function(cache_getter,
- _uncached_load_file)
-
+
definitialize(use_cache:bool,cache_filename:Optional[str]=None):
+"""Initialize the loader."""
+
+ # Unless an environment variable disables it, use the pickle load cache
+ # automatically. Note that this works across all Python programs running the
+ # loader which is why it's located here.
+
+ global_load_file
+
+ # Make a function to compute the cache filename.
+ cache_pattern=(
+ cache_filename
+ oros.getenv("BEANCOUNT_LOAD_CACHE_FILENAME")
+ orPICKLE_CACHE_FILENAME
+ )
+ cache_getter=functools.partial(get_cache_filename,cache_pattern)
+
+ ifuse_cache:
+ _load_file=pickle_cache_function(
+ cache_getter,PICKLE_CACHE_THRESHOLD,_uncached_load_file
+ )
+ else:
+ ifcache_filenameisnotNone:
+ logging.warning(
+ "Cache disabled; "
+ "Explicitly overridden cache filename %s will be ignored.",
+ cache_filename,
+ )
+ _load_file=delete_cache_function(cache_getter,_uncached_load_file)
+
def load_doc(expect_errors=False):
- """A factory of decorators that loads the docstring and calls the function with entries.
-
- This is an incredibly convenient tool to write lots of tests. Write a
- unittest using the standard TestCase class and put the input entries in the
- function's docstring.
-
- Args:
- expect_errors: A boolean or None, with the following semantics,
- True: Expect errors and fail if there are none.
- False: Expect no errors and fail if there are some.
- None: Do nothing, no check.
- Returns:
- A wrapped method that accepts a single 'self' argument.
- """
- def decorator(fun):
- """A decorator that parses the function's docstring as an argument.
-
- Args:
- fun: A callable method, that accepts the three return arguments that
- load() returns.
- Returns:
- A decorated test function.
- """
- @functools.wraps(fun)
- def wrapper(self):
- entries, errors, options_map = load_string(fun.__doc__, dedent=True)
-
- if expect_errors is not None:
- if expect_errors is False and errors:
- oss = io.StringIO()
- printer.print_errors(errors, file=oss)
- self.fail("Unexpected errors found:\n{}".format(oss.getvalue()))
- elif expect_errors is True and not errors:
- self.fail("Expected errors, none found:")
-
- # Note: Even if we expected no errors, we call this function with an
- # empty 'errors' list. This is so that the interface does not change
- # based on the arguments to the decorator, which would be somewhat
- # ugly and which would require explanation.
- return fun(self, entries, errors, options_map)
-
- wrapper.__input__ = wrapper.__doc__
- wrapper.__doc__ = None
- return wrapper
-
- return decorator
-
+
defload_doc(expect_errors=False):
+"""A factory of decorators that loads the docstring and calls the function with entries.
+
+ This is an incredibly convenient tool to write lots of tests. Write a
+ unittest using the standard TestCase class and put the input entries in the
+ function's docstring.
+
+ Args:
+ expect_errors: A boolean or None, with the following semantics,
+ True: Expect errors and fail if there are none.
+ False: Expect no errors and fail if there are some.
+ None: Do nothing, no check.
+ Returns:
+ A wrapped method that accepts a single 'self' argument.
+ """
+
+ defdecorator(fun):
+"""A decorator that parses the function's docstring as an argument.
+
+ Args:
+ fun: A callable method, that accepts the three return arguments that
+ load() returns.
+ Returns:
+ A decorated test function.
+ """
+
+ @functools.wraps(fun)
+ defwrapper(self):
+ entries,errors,options_map=load_string(fun.__doc__,dedent=True)
+
+ ifexpect_errorsisnotNone:
+ ifexpect_errorsisFalseanderrors:
+ oss=io.StringIO()
+ printer.print_errors(errors,file=oss)
+ self.fail("Unexpected errors found:\n{}".format(oss.getvalue()))
+ elifexpect_errorsisTrueandnoterrors:
+ self.fail("Expected errors, none found:")
+
+ # Note: Even if we expected no errors, we call this function with an
+ # empty 'errors' list. This is so that the interface does not change
+ # based on the arguments to the decorator, which would be somewhat
+ # ugly and which would require explanation.
+ returnfun(self,entries,errors,options_map)
+
+ wrapper.__input__=wrapper.__doc__
+ wrapper.__doc__=None
+ returnwrapper
+
+ returndecorator
+
def load_encrypted_file(filename, log_timings=None, log_errors=None, extra_validations=None,
- dedent=False, encoding=None):
- """Load an encrypted Beancount input file.
-
- Args:
- filename: The name of an encrypted file to be parsed.
- log_timings: See load_string().
- log_errors: See load_string().
- extra_validations: See load_string().
- dedent: See load_string().
- encoding: See load_string().
- Returns:
- A triple of (entries, errors, option_map) where "entries" is a date-sorted
- list of entries from the file, "errors" a list of error objects generated
- while parsing and validating the file, and "options_map", a dict of the
- options parsed from the file.
- """
- contents = encryption.read_encrypted_file(filename)
- return load_string(contents,
- log_timings=log_timings,
- log_errors=log_errors,
- extra_validations=extra_validations,
- encoding=encoding)
-
+
defload_encrypted_file(
+ filename,
+ log_timings=None,
+ log_errors=None,
+ extra_validations=None,
+ dedent=False,
+ encoding=None,
+):
+"""Load an encrypted Beancount input file.
+
+ Args:
+ filename: The name of an encrypted file to be parsed.
+ log_timings: See load_string().
+ log_errors: See load_string().
+ extra_validations: See load_string().
+ dedent: See load_string().
+ encoding: See load_string().
+ Returns:
+ A triple of (entries, errors, option_map) where "entries" is a date-sorted
+ list of entries from the file, "errors" a list of error objects generated
+ while parsing and validating the file, and "options_map", a dict of the
+ options parsed from the file.
+ """
+ contents=encryption.read_encrypted_file(filename)
+ returnload_string(
+ contents,
+ log_timings=log_timings,
+ log_errors=log_errors,
+ extra_validations=extra_validations,
+ encoding=encoding,
+ )
+
log_timings – A file object or function to write timings to,
-or None, if it should remain quiet.
+or None, if it should remain quiet. (Note that this is intended to use
+the logging methods and does not insert a newline.)
log_errors – A file object or function to write errors to,
or None, if it should remain quiet.
extra_validations – A list of extra validation functions to run after loading
@@ -1044,42 +1065,43 @@
Source code in beancount/loader.py
-
def load_file(filename, log_timings=None, log_errors=None, extra_validations=None,
- encoding=None):
- """Open a Beancount input file, parse it, run transformations and validate.
-
- Args:
- filename: The name of the file to be parsed.
- log_timings: A file object or function to write timings to,
- or None, if it should remain quiet.
- log_errors: A file object or function to write errors to,
- or None, if it should remain quiet.
- extra_validations: A list of extra validation functions to run after loading
- this list of entries.
- encoding: A string or None, the encoding to decode the input filename with.
- Returns:
- A triple of (entries, errors, option_map) where "entries" is a date-sorted
- list of entries from the file, "errors" a list of error objects generated
- while parsing and validating the file, and "options_map", a dict of the
- options parsed from the file.
- """
- filename = path.expandvars(path.expanduser(filename))
- if not path.isabs(filename):
- filename = path.normpath(path.join(os.getcwd(), filename))
-
- if encryption.is_encrypted_file(filename):
- # Note: Caching is not supported for encrypted files.
- entries, errors, options_map = load_encrypted_file(
- filename,
- log_timings, log_errors,
- extra_validations, False, encoding)
- else:
- entries, errors, options_map = _load_file(
- filename, log_timings,
- extra_validations, encoding)
- _log_errors(errors, log_errors)
- return entries, errors, options_map
-
+
defload_file(
+ filename,log_timings=None,log_errors=None,extra_validations=None,encoding=None
+):
+"""Open a Beancount input file, parse it, run transformations and validate.
+
+ Args:
+ filename: The name of the file to be parsed.
+ log_timings: A file object or function to write timings to,
+ or None, if it should remain quiet. (Note that this is intended to use
+ the logging methods and does not insert a newline.)
+ log_errors: A file object or function to write errors to,
+ or None, if it should remain quiet.
+ extra_validations: A list of extra validation functions to run after loading
+ this list of entries.
+ encoding: A string or None, the encoding to decode the input filename with.
+ Returns:
+ A triple of (entries, errors, option_map) where "entries" is a date-sorted
+ list of entries from the file, "errors" a list of error objects generated
+ while parsing and validating the file, and "options_map", a dict of the
+ options parsed from the file.
+ """
+ filename=path.expandvars(path.expanduser(filename))
+ ifnotpath.isabs(filename):
+ filename=path.normpath(path.join(os.getcwd(),filename))
+
+ ifencryption.is_encrypted_file(filename):
+ # Note: Caching is not supported for encrypted files.
+ entries,errors,options_map=load_encrypted_file(
+ filename,log_timings,log_errors,extra_validations,False,encoding
+ )
+ else:
+ entries,errors,options_map=_load_file(
+ filename,log_timings,extra_validations,encoding
+ )
+ _log_errors(errors,log_errors)
+ returnentries,errors,options_map
+
def load_string(string, log_timings=None, log_errors=None, extra_validations=None,
- dedent=False, encoding=None):
-
- """Open a Beancount input string, parse it, run transformations and validate.
-
- Args:
- string: A Beancount input string.
- log_timings: A file object or function to write timings to,
- or None, if it should remain quiet.
- log_errors: A file object or function to write errors to,
- or None, if it should remain quiet.
- extra_validations: A list of extra validation functions to run after loading
- this list of entries.
- dedent: A boolean, if set, remove the whitespace in front of the lines.
- encoding: A string or None, the encoding to decode the input string with.
- Returns:
- A triple of (entries, errors, option_map) where "entries" is a date-sorted
- list of entries from the string, "errors" a list of error objects
- generated while parsing and validating the string, and "options_map", a
- dict of the options parsed from the string.
- """
- if dedent:
- string = textwrap.dedent(string)
- entries, errors, options_map = _load([(string, False)], log_timings,
- extra_validations, encoding)
- _log_errors(errors, log_errors)
- return entries, errors, options_map
-
+
defload_string(
+ string,
+ log_timings=None,
+ log_errors=None,
+ extra_validations=None,
+ dedent=False,
+ encoding=None,
+):
+"""Open a Beancount input string, parse it, run transformations and validate.
+
+ Args:
+ string: A Beancount input string.
+ log_timings: A file object or function to write timings to,
+ or None, if it should remain quiet.
+ log_errors: A file object or function to write errors to,
+ or None, if it should remain quiet.
+ extra_validations: A list of extra validation functions to run after loading
+ this list of entries.
+ dedent: A boolean, if set, remove the whitespace in front of the lines.
+ encoding: A string or None, the encoding to decode the input string with.
+ Returns:
+ A triple of (entries, errors, option_map) where "entries" is a date-sorted
+ list of entries from the string, "errors" a list of error objects
+ generated while parsing and validating the string, and "options_map", a
+ dict of the options parsed from the string.
+ """
+ ifdedent:
+ string=textwrap.dedent(string)
+ entries,errors,options_map=_load(
+ [(string,False)],log_timings,extra_validations,encoding
+ )
+ _log_errors(errors,log_errors)
+ returnentries,errors,options_map
+
def needs_refresh(options_map):
- """Predicate that returns true if at least one of the input files may have changed.
-
- Args:
- options_map: An options dict as per the parser.
- mtime: A modified time, to check if it covers the include files in the options_map.
- Returns:
- A boolean, true if the input is obsoleted by changes in the input files.
- """
- if options_map is None:
- return True
- input_hash = compute_input_hash(options_map['include'])
- return 'input_hash' not in options_map or input_hash != options_map['input_hash']
-
+
defneeds_refresh(options_map):
+"""Predicate that returns true if at least one of the input files may have changed.
+
+ Args:
+ options_map: An options dict as per the parser.
+ mtime: A modified time, to check if it covers the include files in the options_map.
+ Returns:
+ A boolean, true if the input is obsoleted by changes in the input files.
+ """
+ ifoptions_mapisNone:
+ returnTrue
+ input_hash=compute_input_hash(options_map["include"])
+ return"input_hash"notinoptions_maporinput_hash!=options_map["input_hash"]
+
def pickle_cache_function(cache_getter, time_threshold, function):
- """Decorate a loader function to make it loads its result from a pickle cache.
-
- This considers the first argument as a top-level filename and assumes the
- function to be cached returns an (entries, errors, options_map) triple. We
- use the 'include' option value in order to check whether any of the included
- files has changed. It's essentially a special case for an on-disk memoizer.
- If any of the included files are more recent than the cache, the function is
- recomputed and the cache refreshed.
-
- Args:
- cache_getter: A function of one argument, the top-level filename, which
- will return the name of the corresponding cache file.
- time_threshold: A float, the number of seconds below which we don't bother
- caching.
- function: A function object to decorate for caching.
- Returns:
- A decorated function which will pull its result from a cache file if
- it is available.
- """
- @functools.wraps(function)
- def wrapped(toplevel_filename, *args, **kw):
- cache_filename = cache_getter(toplevel_filename)
-
- # Read the cache if it exists in order to get the list of files whose
- # timestamps to check.
- exists = path.exists(cache_filename)
- if exists:
- with open(cache_filename, 'rb') as file:
- try:
- result = pickle.load(file)
- except Exception as exc:
- # Note: Not a big fan of doing this, but here we handle all
- # possible exceptions because unpickling of an old or
- # corrupted pickle file manifests as a variety of different
- # exception types.
-
- # The cache file is corrupted; ignore it and recompute.
- logging.error("Cache file is corrupted: %s; recomputing.", exc)
- result = None
-
- else:
- # Check that the latest timestamp has not been written after the
- # cache file.
- entries, errors, options_map = result
- if not needs_refresh(options_map):
- # All timestamps are legit; cache hit.
- return result
-
- # We failed; recompute the value.
- if exists:
- try:
- os.remove(cache_filename)
- except OSError as exc:
- # Warn for errors on read-only filesystems.
- logging.warning("Could not remove picklecache file %s: %s",
- cache_filename, exc)
-
- time_before = time.time()
- result = function(toplevel_filename, *args, **kw)
- time_after = time.time()
-
- # Overwrite the cache file if the time it takes to compute it
- # justifies it.
- if time_after - time_before > time_threshold:
- try:
- with open(cache_filename, 'wb') as file:
- pickle.dump(result, file)
- except Exception as exc:
- logging.warning("Could not write to picklecache file %s: %s",
- cache_filename, exc)
-
- return result
- return wrapped
-
+
defpickle_cache_function(cache_getter,time_threshold,function):
+"""Decorate a loader function to make it loads its result from a pickle cache.
+
+ This considers the first argument as a top-level filename and assumes the
+ function to be cached returns an (entries, errors, options_map) triple. We
+ use the 'include' option value in order to check whether any of the included
+ files has changed. It's essentially a special case for an on-disk memoizer.
+ If any of the included files are more recent than the cache, the function is
+ recomputed and the cache refreshed.
+
+ Args:
+ cache_getter: A function of one argument, the top-level filename, which
+ will return the name of the corresponding cache file.
+ time_threshold: A float, the number of seconds below which we don't bother
+ caching.
+ function: A function object to decorate for caching.
+ Returns:
+ A decorated function which will pull its result from a cache file if
+ it is available.
+ """
+
+ @functools.wraps(function)
+ defwrapped(toplevel_filename,*args,**kw):
+ cache_filename=cache_getter(toplevel_filename)
+
+ # Read the cache if it exists in order to get the list of files whose
+ # timestamps to check.
+ exists=path.exists(cache_filename)
+ ifexists:
+ withopen(cache_filename,"rb")asfile:
+ try:
+ result=pickle.load(file)
+ exceptExceptionasexc:
+ # Note: Not a big fan of doing this, but here we handle all
+ # possible exceptions because unpickling of an old or
+ # corrupted pickle file manifests as a variety of different
+ # exception types.
+
+ # The cache file is corrupted; ignore it and recompute.
+ logging.error("Cache file is corrupted: %s; recomputing.",exc)
+ result=None
+
+ else:
+ # Check that the latest timestamp has not been written after the
+ # cache file.
+ entries,errors,options_map=result
+ ifnotneeds_refresh(options_map):
+ # All timestamps are legit; cache hit.
+ returnresult
+
+ # We failed; recompute the value.
+ ifexists:
+ try:
+ os.remove(cache_filename)
+ exceptOSErrorasexc:
+ # Warn for errors on read-only filesystems.
+ logging.warning(
+ "Could not remove picklecache file %s: %s",cache_filename,exc
+ )
+
+ time_before=time.time()
+ result=function(toplevel_filename,*args,**kw)
+ time_after=time.time()
+
+ # Overwrite the cache file if the time it takes to compute it
+ # justifies it.
+ iftime_after-time_before>time_threshold:
+ try:
+ withopen(cache_filename,"wb")asfile:
+ pickle.dump(result,file)
+ exceptExceptionasexc:
+ logging.warning(
+ "Could not write to picklecache file %s: %s",cache_filename,exc
+ )
+
+ returnresult
+
+ returnwrapped
+
def run_transformations(entries, parse_errors, options_map, log_timings):
- """Run the various transformations on the entries.
-
- This is where entries are being synthesized, checked, plugins are run, etc.
-
- Args:
- entries: A list of directives as read from the parser.
- parse_errors: A list of errors so far.
- options_map: An options dict as read from the parser.
- log_timings: A function to write timing log entries to, or None, if it
- should be quiet.
- Returns:
- A list of modified entries, and a list of errors, also possibly modified.
- """
- # A list of errors to extend (make a copy to avoid modifying the input).
- errors = list(parse_errors)
-
- # Process the plugins.
- if options_map['plugin_processing_mode'] == 'raw':
- plugins_iter = options_map["plugin"]
- elif options_map['plugin_processing_mode'] == 'default':
- plugins_iter = itertools.chain(DEFAULT_PLUGINS_PRE,
- options_map["plugin"],
- DEFAULT_PLUGINS_POST)
- else:
- assert "Invalid value for plugin_processing_mode: {}".format(
- options_map['plugin_processing_mode'])
-
- for plugin_name, plugin_config in plugins_iter:
-
- # Issue a warning on a renamed module.
- renamed_name = RENAMED_MODULES.get(plugin_name, None)
- if renamed_name:
- warnings.warn("Deprecation notice: Module '{}' has been renamed to '{}'; "
- "please adjust your plugin directive.".format(
- plugin_name, renamed_name))
- plugin_name = renamed_name
-
- # Try to import the module.
- try:
- module = importlib.import_module(plugin_name)
- if not hasattr(module, '__plugins__'):
- continue
-
- with misc_utils.log_time(plugin_name, log_timings, indent=2):
-
- # Run each transformer function in the plugin.
- for function_name in module.__plugins__:
- if isinstance(function_name, str):
- # Support plugin functions provided by name.
- callback = getattr(module, function_name)
- else:
- # Support function types directly, not just names.
- callback = function_name
-
- if plugin_config is not None:
- entries, plugin_errors = callback(entries, options_map,
- plugin_config)
- else:
- entries, plugin_errors = callback(entries, options_map)
- errors.extend(plugin_errors)
-
- # Ensure that the entries are sorted. Don't trust the plugins
- # themselves.
- entries.sort(key=data.entry_sortkey)
-
- except (ImportError, TypeError) as exc:
- # Upon failure, just issue an error.
- errors.append(LoadError(data.new_metadata("<load>", 0),
- 'Error importing "{}": {}'.format(
- plugin_name, str(exc)), None))
-
- return entries, errors
-
+
defrun_transformations(entries,parse_errors,options_map,log_timings):
+"""Run the various transformations on the entries.
+
+ This is where entries are being synthesized, checked, plugins are run, etc.
+
+ Args:
+ entries: A list of directives as read from the parser.
+ parse_errors: A list of errors so far.
+ options_map: An options dict as read from the parser.
+ log_timings: A function to write timing log entries to, or None, if it
+ should be quiet.
+ Returns:
+ A list of modified entries, and a list of errors, also possibly modified.
+ """
+ # A list of errors to extend (make a copy to avoid modifying the input).
+ errors=list(parse_errors)
+
+ # Process the plugins.
+ ifoptions_map["plugin_processing_mode"]=="raw":
+ plugins_iter=options_map["plugin"]
+ elifoptions_map["plugin_processing_mode"]=="default":
+ plugins_iter=itertools.chain(
+ PLUGINS_PRE,options_map["plugin"],PLUGINS_AUTO,PLUGINS_POST
+ )
+ else:
+ assert"Invalid value for plugin_processing_mode: {}".format(
+ options_map["plugin_processing_mode"]
+ )
+
+ forplugin_name,plugin_configinplugins_iter:
+ # Issue a warning on a renamed module.
+ renamed_name=RENAMED_MODULES.get(plugin_name,None)
+ ifrenamed_name:
+ warnings.warn(
+ "Deprecation notice: Module '{}' has been renamed to '{}'; "
+ "please adjust your plugin directive.".format(plugin_name,renamed_name)
+ )
+ plugin_name=renamed_name
+
+ # Try to import the module.
+ #
+ # Note: We intercept import errors and continue but let other plugin
+ # import time exceptions fail a run, by choice.
+ try:
+ module=importlib.import_module(plugin_name)
+ ifnothasattr(module,"__plugins__"):
+ continue
+ exceptImportError:
+ # Upon failure, just issue an error.
+ formatted_traceback=traceback.format_exc().replace("\n","\n ")
+ errors.append(
+ LoadError(
+ data.new_metadata("<load>",0),
+ 'Error importing "{}": {}'.format(plugin_name,formatted_traceback),
+ None,
+ )
+ )
+ continue
+
+ # Apply it.
+ withmisc_utils.log_time(plugin_name,log_timings,indent=2):
+ # Run each transformer function in the plugin.
+ forfunction_nameinmodule.__plugins__:
+ ifisinstance(function_name,str):
+ # Support plugin functions provided by name.
+ callback=getattr(module,function_name)
+ else:
+ # Support function types directly, not just names.
+ callback=function_name
+
+ # Provide arguments if config is provided.
+ # TODO(blais): Make this consistent in v3, not conditional.
+ args=()ifplugin_configisNoneelse(plugin_config,)
+
+ # Catch all exceptions raised in running the plugin, except exits.
+ try:
+ entries,plugin_errors=callback(entries,options_map,*args)
+ errors.extend(plugin_errors)
+ exceptExceptionasexc:
+ # Allow the user to exit in a plugin.
+ ifisinstance(exc,SystemExit):
+ raise
+
+ # Upon failure, just issue an error.
+ formatted_traceback=traceback.format_exc().replace("\n","\n ")
+ errors.append(
+ LoadError(
+ data.new_metadata("<load>",0),
+ 'Error applying plugin "{}": {}'.format(
+ plugin_name,formatted_traceback
+ ),
+ None,
+ )
+ )
+ continue
+
+ # Ensure that the entries are sorted. Don't trust the plugins
+ # themselves.
+ entries.sort(key=data.entry_sortkey)
+
+ returnentries,errors
+
def check(entries, options_map):
- """Process the balance assertion directives.
-
- For each Balance directive, check that their expected balance corresponds to
- the actual balance computed at that time and replace failing ones by new
- ones with a flag that indicates failure.
-
- Args:
- entries: A list of directives.
- options_map: A dict of options, parsed from the input file.
- Returns:
- A pair of a list of directives and a list of balance check errors.
- """
- new_entries = []
- check_errors = []
-
- # This is similar to realization, but performed in a different order, and
- # where we only accumulate inventories for accounts that have balance
- # assertions in them (this saves on time). Here we process the entries one
- # by one along with the balance checks. We use a temporary realization in
- # order to hold the incremental tree of balances, so that we can easily get
- # the amounts of an account's subaccounts for making checks on parent
- # accounts.
- real_root = realization.RealAccount('')
-
- # Figure out the set of accounts for which we need to compute a running
- # inventory balance.
- asserted_accounts = {entry.account
- for entry in entries
- if isinstance(entry, Balance)}
-
- # Add all children accounts of an asserted account to be calculated as well,
- # and pre-create these accounts, and only those (we're just being tight to
- # make sure).
- asserted_match_list = [account.parent_matcher(account_)
- for account_ in asserted_accounts]
- for account_ in getters.get_accounts(entries):
- if (account_ in asserted_accounts or
- any(match(account_) for match in asserted_match_list)):
- realization.get_or_create(real_root, account_)
-
- # Get the Open directives for each account.
- open_close_map = getters.get_account_open_close(entries)
-
- for entry in entries:
- if isinstance(entry, Transaction):
- # For each of the postings' accounts, update the balance inventory.
- for posting in entry.postings:
- real_account = realization.get(real_root, posting.account)
-
- # The account will have been created only if we're meant to track it.
- if real_account is not None:
- # Note: Always allow negative lots for the purpose of balancing.
- # This error should show up somewhere else than here.
- real_account.balance.add_position(posting)
-
- elif isinstance(entry, Balance):
- # Check that the currency of the balance check is one of the allowed
- # currencies for that account.
- expected_amount = entry.amount
- try:
- open, _ = open_close_map[entry.account]
- except KeyError:
- check_errors.append(
- BalanceError(entry.meta,
- "Account '{}' does not exist: ".format(entry.account),
- entry))
- continue
-
- if (expected_amount is not None and
- open and open.currencies and
- expected_amount.currency not in open.currencies):
- check_errors.append(
- BalanceError(entry.meta,
- "Invalid currency '{}' for Balance directive: ".format(
- expected_amount.currency),
- entry))
-
- # Sum up the current balances for this account and its
- # sub-accounts. We want to support checks for parent accounts
- # for the total sum of their subaccounts.
- #
- # FIXME: Improve the performance further by computing the balance
- # for the desired currency only. This won't allow us to cache in
- # this way but may be faster, if we're not asserting all the
- # currencies. Furthermore, we could probably avoid recomputing the
- # balance if a subtree of positions hasn't been invalidated by a new
- # position added to the realization. Do this.
- real_account = realization.get(real_root, entry.account)
- assert real_account is not None, "Missing {}".format(entry.account)
- subtree_balance = realization.compute_balance(real_account, leaf_only=False)
-
- # Get only the amount in the desired currency.
- balance_amount = subtree_balance.get_currency_units(expected_amount.currency)
-
- # Check if the amount is within bounds of the expected amount.
- diff_amount = amount.sub(balance_amount, expected_amount)
-
- # Use the specified tolerance or automatically infer it.
- tolerance = get_balance_tolerance(entry, options_map)
-
- if abs(diff_amount.number) > tolerance:
- check_errors.append(
- BalanceError(entry.meta,
- ("Balance failed for '{}': "
- "expected {} != accumulated {} ({} {})").format(
- entry.account, expected_amount, balance_amount,
- abs(diff_amount.number),
- ('too much'
- if diff_amount.number > 0
- else 'too little')),
- entry))
-
- # Substitute the entry by a failing entry, with the diff_amount
- # field set on it. I'm not entirely sure that this is the best
- # of ideas, maybe leaving the original check intact and insert a
- # new error entry might be more functional or easier to
- # understand.
- entry = entry._replace(
- meta=entry.meta.copy(),
- diff_amount=diff_amount)
-
- new_entries.append(entry)
-
- return new_entries, check_errors
-
+
defcheck(entries,options_map):
+"""Process the balance assertion directives.
+
+ For each Balance directive, check that their expected balance corresponds to
+ the actual balance computed at that time and replace failing ones by new
+ ones with a flag that indicates failure.
+
+ Args:
+ entries: A list of directives.
+ options_map: A dict of options, parsed from the input file.
+ Returns:
+ A pair of a list of directives and a list of balance check errors.
+ """
+ new_entries=[]
+ check_errors=[]
+
+ # This is similar to realization, but performed in a different order, and
+ # where we only accumulate inventories for accounts that have balance
+ # assertions in them (this saves on time). Here we process the entries one
+ # by one along with the balance checks. We use a temporary realization in
+ # order to hold the incremental tree of balances, so that we can easily get
+ # the amounts of an account's subaccounts for making checks on parent
+ # accounts.
+ real_root=realization.RealAccount("")
+
+ # Figure out the set of accounts for which we need to compute a running
+ # inventory balance.
+ asserted_accounts={entry.accountforentryinentriesifisinstance(entry,Balance)}
+
+ # Add all children accounts of an asserted account to be calculated as well,
+ # and pre-create these accounts, and only those (we're just being tight to
+ # make sure).
+ asserted_match_list=[
+ account.parent_matcher(account_)foraccount_inasserted_accounts
+ ]
+ foraccount_ingetters.get_accounts(entries):
+ ifaccount_inasserted_accountsorany(
+ match(account_)formatchinasserted_match_list
+ ):
+ realization.get_or_create(real_root,account_)
+
+ # Get the Open directives for each account.
+ open_close_map=getters.get_account_open_close(entries)
+
+ forentryinentries:
+ ifisinstance(entry,Transaction):
+ # For each of the postings' accounts, update the balance inventory.
+ forpostinginentry.postings:
+ real_account=realization.get(real_root,posting.account)
+
+ # The account will have been created only if we're meant to track it.
+ ifreal_accountisnotNone:
+ # Note: Always allow negative lots for the purpose of balancing.
+ # This error should show up somewhere else than here.
+ real_account.balance.add_position(posting)
+
+ elifisinstance(entry,Balance):
+ # Check that the currency of the balance check is one of the allowed
+ # currencies for that account.
+ expected_amount=entry.amount
+ try:
+ open,_=open_close_map[entry.account]
+ exceptKeyError:
+ check_errors.append(
+ BalanceError(
+ entry.meta,
+ "Invalid reference to unknown account '{}'".format(entry.account),
+ entry,
+ )
+ )
+ continue
+
+ if(
+ expected_amountisnotNone
+ andopen
+ andopen.currencies
+ andexpected_amount.currencynotinopen.currencies
+ ):
+ check_errors.append(
+ BalanceError(
+ entry.meta,
+ "Invalid currency '{}' for Balance directive: ".format(
+ expected_amount.currency
+ ),
+ entry,
+ )
+ )
+
+ # Sum up the current balances for this account and its
+ # sub-accounts. We want to support checks for parent accounts
+ # for the total sum of their subaccounts.
+ #
+ # FIXME: Improve the performance further by computing the balance
+ # for the desired currency only. This won't allow us to cache in
+ # this way but may be faster, if we're not asserting all the
+ # currencies. Furthermore, we could probably avoid recomputing the
+ # balance if a subtree of positions hasn't been invalidated by a new
+ # position added to the realization. Do this.
+ real_account=realization.get(real_root,entry.account)
+ assertreal_accountisnotNone,"Missing {}".format(entry.account)
+ subtree_balance=realization.compute_balance(real_account,leaf_only=False)
+
+ # Get only the amount in the desired currency.
+ balance_amount=subtree_balance.get_currency_units(expected_amount.currency)
+
+ # Check if the amount is within bounds of the expected amount.
+ diff_amount=amount.sub(balance_amount,expected_amount)
+
+ # Use the specified tolerance or automatically infer it.
+ tolerance=get_balance_tolerance(entry,options_map)
+
+ ifabs(diff_amount.number)>tolerance:
+ check_errors.append(
+ BalanceError(
+ entry.meta,
+ (
+ "Balance failed for '{}': "
+ "expected {} != accumulated {} ({}{})"
+ ).format(
+ entry.account,
+ expected_amount,
+ balance_amount,
+ abs(diff_amount.number),
+ ("too much"ifdiff_amount.number>0else"too little"),
+ ),
+ entry,
+ )
+ )
+
+ # Substitute the entry by a failing entry, with the diff_amount
+ # field set on it. I'm not entirely sure that this is the best
+ # of ideas, maybe leaving the original check intact and insert a
+ # new error entry might be more functional or easier to
+ # understand.
+ entry=entry._replace(meta=entry.meta.copy(),diff_amount=diff_amount)
+
+ new_entries.append(entry)
+
+ returnnew_entries,check_errors
+
def get_balance_tolerance(balance_entry, options_map):
- """Get the tolerance amount for a single entry.
-
- Args:
- balance_entry: An instance of data.Balance
- options_map: An options dict, as per the parser.
- Returns:
- A Decimal, the amount of tolerance implied by the directive.
- """
- if balance_entry.tolerance is not None:
- # Use the balance-specific tolerance override if it is provided.
- tolerance = balance_entry.tolerance
-
- else:
- expo = balance_entry.amount.number.as_tuple().exponent
- if expo < 0:
- # Be generous and always allow twice the multiplier on Balance and
- # Pad because the user creates these and the rounding of those
- # balances may often be further off than those used within a single
- # transaction.
- tolerance = options_map["inferred_tolerance_multiplier"] * 2
- tolerance = ONE.scaleb(expo) * tolerance
- else:
- tolerance = ZERO
-
- return tolerance
-
+
defget_balance_tolerance(balance_entry,options_map):
+"""Get the tolerance amount for a single entry.
+
+ Args:
+ balance_entry: An instance of data.Balance
+ options_map: An options dict, as per the parser.
+ Returns:
+ A Decimal, the amount of tolerance implied by the directive.
+ """
+ ifbalance_entry.toleranceisnotNone:
+ # Use the balance-specific tolerance override if it is provided.
+ tolerance=balance_entry.tolerance
+
+ else:
+ expo=balance_entry.amount.number.as_tuple().exponent
+ ifexpo<0:
+ # Be generous and always allow twice the multiplier on Balance and
+ # Pad because the user creates these and the rounding of those
+ # balances may often be further off than those used within a single
+ # transaction.
+ tolerance=options_map["inferred_tolerance_multiplier"]*2
+ tolerance=ONE.scaleb(expo)*tolerance
+ else:
+ tolerance=ZERO
+
+ returntolerance
+
def filter_link(link, entries):
- """Yield all the entries which have the given link.
-
- Args:
- link: A string, the link we are interested in.
- Yields:
- Every entry in 'entries' that links to 'link.
- """
- for entry in entries:
- # pylint: disable=bad-continuation
- if (isinstance(entry, data.Transaction) and
- entry.links and link in entry.links):
- yield entry
-
+
deffilter_link(link,entries):
+"""Yield all the entries which have the given link.
+
+ Args:
+ link: A string, the link we are interested in.
+ Yields:
+ Every entry in 'entries' that links to 'link.
+ """
+ forentryinentries:
+ ifisinstance(entry,data.Transaction)andentry.linksandlinkinentry.links:
+ yieldentry
+
def filter_tag(tag, entries):
- """Yield all the entries which have the given tag.
-
- Args:
- tag: A string, the tag we are interested in.
- Yields:
- Every entry in 'entries' that tags to 'tag.
- """
- for entry in entries:
- # pylint: disable=bad-continuation
- if (isinstance(entry, data.Transaction) and
- entry.tags and
- tag in entry.tags):
- yield entry
-
+
deffilter_tag(tag,entries):
+"""Yield all the entries which have the given tag.
+
+ Args:
+ tag: A string, the tag we are interested in.
+ Yields:
+ Every entry in 'entries' that tags to 'tag.
+ """
+ forentryinentries:
+ ifisinstance(entry,data.Transaction)andentry.tagsandtaginentry.tags:
+ yieldentry
+
def get_common_accounts(entries):
- """Compute the intersection of the accounts on the given entries.
-
- Args:
- entries: A list of Transaction entries to process.
- Returns:
- A set of strings, the names of the common accounts from these
- entries.
- """
- assert all(isinstance(entry, data.Transaction) for entry in entries)
-
- # If there is a single entry, the common accounts to it is all its accounts.
- # Note that this also works with no entries (yields an empty set).
- if len(entries) < 2:
- if entries:
- intersection = {posting.account for posting in entries[0].postings}
- else:
- intersection = set()
- else:
- entries_iter = iter(entries)
- intersection = set(posting.account for posting in next(entries_iter).postings)
- for entry in entries_iter:
- accounts = set(posting.account for posting in entry.postings)
- intersection &= accounts
- if not intersection:
- break
- return intersection
-
+
defget_common_accounts(entries):
+"""Compute the intersection of the accounts on the given entries.
+
+ Args:
+ entries: A list of Transaction entries to process.
+ Returns:
+ A set of strings, the names of the common accounts from these
+ entries.
+ """
+ assertall(isinstance(entry,data.Transaction)forentryinentries)
+
+ # If there is a single entry, the common accounts to it is all its accounts.
+ # Note that this also works with no entries (yields an empty set).
+ iflen(entries)<2:
+ ifentries:
+ intersection={posting.accountforpostinginentries[0].postings}
+ else:
+ intersection=set()
+ else:
+ entries_iter=iter(entries)
+ intersection=set(posting.accountforpostinginnext(entries_iter).postings)
+ forentryinentries_iter:
+ accounts=set(posting.accountforpostinginentry.postings)
+ intersection&=accounts
+ ifnotintersection:
+ break
+ returnintersection
+
def group_entries_by_link(entries):
- """Group the list of entries by link.
-
- Args:
- entries: A list of directives/transactions to process.
- Returns:
- A dict of link-name to list of entries.
- """
- link_groups = defaultdict(list)
- for entry in entries:
- if not (isinstance(entry, data.Transaction) and entry.links):
- continue
- for link in entry.links:
- link_groups[link].append(entry)
- return link_groups
-
+
defgroup_entries_by_link(entries):
+"""Group the list of entries by link.
+
+ Args:
+ entries: A list of directives/transactions to process.
+ Returns:
+ A dict of link-name to list of entries.
+ """
+ link_groups=defaultdict(list)
+ forentryinentries:
+ ifnot(isinstance(entry,data.Transaction)andentry.links):
+ continue
+ forlinkinentry.links:
+ link_groups[link].append(entry)
+ returnlink_groups
+
def compress(entries, predicate):
- """Compress multiple transactions into single transactions.
-
- Replace consecutive sequences of Transaction entries that fulfill the given
- predicate by a single entry at the date of the last matching entry.
- 'predicate' is the function that determines if an entry should be
- compressed.
-
- This can be used to simply a list of transactions that are similar and occur
- frequently. As an example, in a retail FOREX trading account, differential
- interest of very small amounts is paid every day; it is not relevant to look
- at the full detail of this interest unless there are other transactions. You
- can use this to compress it into single entries between other types of
- transactions.
-
- Args:
- entries: A list of directives.
- predicate: A callable which accepts an entry and return true if the entry
- is intended to be compressed.
- Returns:
- A list of directives, with compressible transactions replaced by a summary
- equivalent.
- """
- new_entries = []
- pending = []
- for entry in entries:
- if isinstance(entry, data.Transaction) and predicate(entry):
- # Save for compressing later.
- pending.append(entry)
- else:
- # Compress and output all the pending entries.
- if pending:
- new_entries.append(merge(pending, pending[-1]))
- pending.clear()
-
- # Output the differing entry.
- new_entries.append(entry)
-
- if pending:
- new_entries.append(merge(pending, pending[-1]))
-
- return new_entries
-
+
defcompress(entries,predicate):
+"""Compress multiple transactions into single transactions.
+
+ Replace consecutive sequences of Transaction entries that fulfill the given
+ predicate by a single entry at the date of the last matching entry.
+ 'predicate' is the function that determines if an entry should be
+ compressed.
+
+ This can be used to simply a list of transactions that are similar and occur
+ frequently. As an example, in a retail FOREX trading account, differential
+ interest of very small amounts is paid every day; it is not relevant to look
+ at the full detail of this interest unless there are other transactions. You
+ can use this to compress it into single entries between other types of
+ transactions.
+
+ Args:
+ entries: A list of directives.
+ predicate: A callable which accepts an entry and return true if the entry
+ is intended to be compressed.
+ Returns:
+ A list of directives, with compressible transactions replaced by a summary
+ equivalent.
+ """
+ new_entries=[]
+ pending=[]
+ forentryinentries:
+ ifisinstance(entry,data.Transaction)andpredicate(entry):
+ # Save for compressing later.
+ pending.append(entry)
+ else:
+ # Compress and output all the pending entries.
+ ifpending:
+ new_entries.append(merge(pending,pending[-1]))
+ pending.clear()
+
+ # Output the differing entry.
+ new_entries.append(entry)
+
+ ifpending:
+ new_entries.append(merge(pending,pending[-1]))
+
+ returnnew_entries
+
def merge(entries, prototype_txn):
- """Merge the postings of a list of Transactions into a single one.
-
- Merge postings the given entries into a single entry with the Transaction
- attributes of the prototype. Return the new entry. The combined list of
- postings are merged if everything about the postings is the same except the
- number.
-
- Args:
- entries: A list of directives.
- prototype_txn: A Transaction which is used to create the compressed
- Transaction instance. Its list of postings is ignored.
- Returns:
- A new Transaction instance which contains all the postings from the input
- entries merged together.
-
- """
- # Aggregate the postings together. This is a mapping of numberless postings
- # to their number of units.
- postings_map = collections.defaultdict(Decimal)
- for entry in data.filter_txns(entries):
- for posting in entry.postings:
- # We strip the number off the posting to act as an aggregation key.
- key = data.Posting(posting.account,
- Amount(None, posting.units.currency),
- posting.cost,
- posting.price,
- posting.flag,
- None)
- postings_map[key] += posting.units.number
-
- # Create a new transaction with the aggregated postings.
- new_entry = data.Transaction(prototype_txn.meta,
- prototype_txn.date,
- prototype_txn.flag,
- prototype_txn.payee,
- prototype_txn.narration,
- data.EMPTY_SET, data.EMPTY_SET, [])
-
- # Sort for at least some stability of output.
- sorted_items = sorted(postings_map.items(),
- key=lambda item: (item[0].account,
- item[0].units.currency,
- item[1]))
-
- # Issue the merged postings.
- for posting, number in sorted_items:
- units = Amount(number, posting.units.currency)
- new_entry.postings.append(
- data.Posting(posting.account, units, posting.cost, posting.price,
- posting.flag, posting.meta))
-
- return new_entry
-
+
defmerge(entries,prototype_txn):
+"""Merge the postings of a list of Transactions into a single one.
+
+ Merge postings the given entries into a single entry with the Transaction
+ attributes of the prototype. Return the new entry. The combined list of
+ postings are merged if everything about the postings is the same except the
+ number.
+
+ Args:
+ entries: A list of directives.
+ prototype_txn: A Transaction which is used to create the compressed
+ Transaction instance. Its list of postings is ignored.
+ Returns:
+ A new Transaction instance which contains all the postings from the input
+ entries merged together.
+
+ """
+ # Aggregate the postings together. This is a mapping of numberless postings
+ # to their number of units.
+ postings_map=collections.defaultdict(Decimal)
+ forentryindata.filter_txns(entries):
+ forpostinginentry.postings:
+ # We strip the number off the posting to act as an aggregation key.
+ key=data.Posting(
+ posting.account,
+ Amount(None,posting.units.currency),
+ posting.cost,
+ posting.price,
+ posting.flag,
+ None,
+ )
+ postings_map[key]+=posting.units.number
+
+ # Create a new transaction with the aggregated postings.
+ new_entry=data.Transaction(
+ prototype_txn.meta,
+ prototype_txn.date,
+ prototype_txn.flag,
+ prototype_txn.payee,
+ prototype_txn.narration,
+ data.EMPTY_SET,
+ data.EMPTY_SET,
+ [],
+ )
+
+ # Sort for at least some stability of output.
+ sorted_items=sorted(
+ postings_map.items(),
+ key=lambdaitem:(item[0].account,item[0].units.currency,item[1]),
+ )
+
+ # Issue the merged postings.
+ forposting,numberinsorted_items:
+ units=Amount(number,posting.units.currency)
+ new_entry.postings.append(
+ data.Posting(
+ posting.account,
+ units,
+ posting.cost,
+ posting.price,
+ posting.flag,
+ posting.meta,
+ )
+ )
+
+ returnnew_entry
+
def find_documents(directory, input_filename, accounts_only=None, strict=False):
- """Find dated document files under the given directory.
-
- If a restricting set of accounts is provided in 'accounts_only', only return
- entries that correspond to one of the given accounts.
-
- Args:
- directory: A string, the name of the root of the directory hierarchy to be searched.
- input_filename: The name of the file to be used for the Document directives. This is
- also used to resolve relative directory names.
- accounts_only: A set of valid accounts strings to search for.
- strict: A boolean, set to true if you want to generate errors on documents
- found in accounts not provided in accounts_only. This is only meaningful
- if accounts_only is specified.
- Returns:
- A list of new Document objects that were created from the files found, and a list
- of new errors generated.
-
- """
- errors = []
-
- # Compute the documents directory name relative to the beancount input
- # file itself.
- if not path.isabs(directory):
- input_directory = path.dirname(input_filename)
- directory = path.abspath(path.normpath(path.join(input_directory,
- directory)))
-
- # If the directory does not exist, just generate an error and return.
- if not path.exists(directory):
- meta = data.new_metadata(input_filename, 0)
- error = DocumentError(
- meta, "Document root '{}' does not exist".format(directory), None)
- return ([], [error])
-
- # Walk the hierarchy of files.
- entries = []
- for root, account_name, dirs, files in account.walk(directory):
-
- # Look for files that have a dated filename.
- for filename in files:
- match = re.match(r'(\d\d\d\d)-(\d\d)-(\d\d).(.*)', filename)
- if not match:
- continue
-
- # If a restricting set of accounts was specified, skip document
- # directives found in accounts with no corresponding account name.
- if accounts_only is not None and not account_name in accounts_only:
- if strict:
- if any(account_name.startswith(account) for account in accounts_only):
- errors.append(DocumentError(
- data.new_metadata(input_filename, 0),
- "Document '{}' found in child account {}".format(
- filename, account_name), None))
- elif any(account.startswith(account_name) for account in accounts_only):
- errors.append(DocumentError(
- data.new_metadata(input_filename, 0),
- "Document '{}' found in parent account {}".format(
- filename, account_name), None))
- continue
-
- # Create a new directive.
- meta = data.new_metadata(input_filename, 0)
- try:
- date = datetime.date(*map(int, match.group(1, 2, 3)))
- except ValueError as exc:
- errors.append(DocumentError(
- data.new_metadata(input_filename, 0),
- "Invalid date on document file '{}': {}".format(
- filename, exc), None))
- else:
- entry = data.Document(meta, date, account_name, path.join(root, filename),
- data.EMPTY_SET, data.EMPTY_SET)
- entries.append(entry)
-
- return (entries, errors)
-
+
deffind_documents(directory,input_filename,accounts_only=None,strict=False):
+"""Find dated document files under the given directory.
+
+ If a restricting set of accounts is provided in 'accounts_only', only return
+ entries that correspond to one of the given accounts.
+
+ Args:
+ directory: A string, the name of the root of the directory hierarchy to be searched.
+ input_filename: The name of the file to be used for the Document directives. This is
+ also used to resolve relative directory names.
+ accounts_only: A set of valid accounts strings to search for.
+ strict: A boolean, set to true if you want to generate errors on documents
+ found in accounts not provided in accounts_only. This is only meaningful
+ if accounts_only is specified.
+ Returns:
+ A list of new Document objects that were created from the files found, and a list
+ of new errors generated.
+
+ """
+ errors=[]
+
+ # Compute the documents directory name relative to the beancount input
+ # file itself.
+ ifnotpath.isabs(directory):
+ input_directory=path.dirname(input_filename)
+ directory=path.abspath(path.normpath(path.join(input_directory,directory)))
+
+ # If the directory does not exist, just generate an error and return.
+ ifnotpath.exists(directory):
+ meta=data.new_metadata(input_filename,0)
+ error=DocumentError(
+ meta,"Document root '{}' does not exist".format(directory),None
+ )
+ return([],[error])
+
+ # Walk the hierarchy of files.
+ entries=[]
+ forroot,account_name,dirs,filesinaccount.walk(directory):
+ # Look for files that have a dated filename.
+ forfilenameinfiles:
+ match=re.match(r"(\d\d\d\d)-(\d\d)-(\d\d).(.*)",filename)
+ ifnotmatch:
+ continue
+
+ # If a restricting set of accounts was specified, skip document
+ # directives found in accounts with no corresponding account name.
+ ifaccounts_onlyisnotNoneandaccount_namenotinaccounts_only:
+ ifstrict:
+ ifany(account_name.startswith(account)foraccountinaccounts_only):
+ errors.append(
+ DocumentError(
+ data.new_metadata(input_filename,0),
+ "Document '{}' found in child account {}".format(
+ filename,account_name
+ ),
+ None,
+ )
+ )
+ elifany(account.startswith(account_name)foraccountinaccounts_only):
+ errors.append(
+ DocumentError(
+ data.new_metadata(input_filename,0),
+ "Document '{}' found in parent account {}".format(
+ filename,account_name
+ ),
+ None,
+ )
+ )
+ continue
+
+ # Create a new directive.
+ meta=data.new_metadata(input_filename,0)
+ try:
+ date=datetime.date(*map(int,match.group(1,2,3)))
+ exceptValueErrorasexc:
+ errors.append(
+ DocumentError(
+ data.new_metadata(input_filename,0),
+ "Invalid date on document file '{}': {}".format(filename,exc),
+ None,
+ )
+ )
+ else:
+ entry=data.Document(
+ meta,
+ date,
+ account_name,
+ path.join(root,filename),
+ data.EMPTY_SET,
+ data.EMPTY_SET,
+ )
+ entries.append(entry)
+
+ return(entries,errors)
+
def process_documents(entries, options_map):
- """Check files for document directives and create documents directives automatically.
-
- Args:
- entries: A list of all directives parsed from the file.
- options_map: An options dict, as is output by the parser.
- We're using its 'filename' option to figure out relative path to
- search for documents.
- Returns:
- A pair of list of all entries (including new ones), and errors
- generated during the process of creating document directives.
- """
- filename = options_map["filename"]
-
- # Detect filenames that should convert into entries.
- autodoc_entries = []
- autodoc_errors = []
- document_dirs = options_map['documents']
- if document_dirs:
- # Restrict to the list of valid accounts only.
- accounts = getters.get_accounts(entries)
-
- # Accumulate all the entries.
- for directory in map(path.normpath, document_dirs):
- new_entries, new_errors = find_documents(directory, filename, accounts)
- autodoc_entries.extend(new_entries)
- autodoc_errors.extend(new_errors)
-
- # Merge the two lists of entries and errors. Keep the entries sorted.
- entries.extend(autodoc_entries)
- entries.sort(key=data.entry_sortkey)
-
- return (entries, autodoc_errors)
-
+
defprocess_documents(entries,options_map):
+"""Check files for document directives and create documents directives automatically.
+
+ Args:
+ entries: A list of all directives parsed from the file.
+ options_map: An options dict, as is output by the parser.
+ We're using its 'filename' option to figure out relative path to
+ search for documents.
+ Returns:
+ A pair of list of all entries (including new ones), and errors
+ generated during the process of creating document directives.
+ """
+ filename=options_map["filename"]
+
+ # Detect filenames that should convert into entries.
+ autodoc_entries=[]
+ autodoc_errors=[]
+ document_dirs=options_map["documents"]
+ ifdocument_dirs:
+ # Restrict to the list of valid accounts only.
+ accounts=getters.get_accounts(entries)
+
+ # Accumulate all the entries.
+ fordirectoryinmap(path.normpath,document_dirs):
+ new_entries,new_errors=find_documents(directory,filename,accounts)
+ autodoc_entries.extend(new_entries)
+ autodoc_errors.extend(new_errors)
+
+ # Merge the two lists of entries and errors. Keep the entries sorted.
+ entries.extend(autodoc_entries)
+ entries.sort(key=data.entry_sortkey)
+
+ return(entries,autodoc_errors)
+
def verify_document_files_exist(entries, unused_options_map):
- """Verify that the document entries point to existing files.
-
- Args:
- entries: a list of directives whose documents need to be validated.
- unused_options_map: A parser options dict. We're not using it.
- Returns:
- The same list of entries, and a list of new errors, if any were encountered.
- """
- errors = []
- for entry in entries:
- if not isinstance(entry, data.Document):
- continue
- if not path.exists(entry.filename):
- errors.append(
- DocumentError(entry.meta,
- 'File does not exist: "{}"'.format(entry.filename),
- entry))
- return entries, errors
-
+
defverify_document_files_exist(entries,unused_options_map):
+"""Verify that the document entries point to existing files.
+
+ Args:
+ entries: a list of directives whose documents need to be validated.
+ unused_options_map: A parser options dict. We're not using it.
+ Returns:
+ The same list of entries, and a list of new errors, if any were encountered.
+ """
+ errors=[]
+ forentryinentries:
+ ifnotisinstance(entry,data.Document):
+ continue
+ ifnotpath.exists(entry.filename):
+ errors.append(
+ DocumentError(
+ entry.meta,'File does not exist: "{}"'.format(entry.filename),entry
+ )
+ )
+ returnentries,errors
+
Note that the cost-currency must always be included in the group-key (sums
-over multiple currency units do not make sense), so it is appended to the
-sort-key automatically.
+
Return currencies relevant for the given date.
+
This computes the account balances as of the date, and returns the union of:
+a) The currencies held at cost, and
+b) Currency pairs from previous conversions, but only for currencies with
+ non-zero balances.
+
This is intended to produce the list of currencies whose prices are relevant
+at a particular date, based on previous history.
@@ -2096,8 +1992,8 @@
Parameters:
-
keyfun – A callable, which returns the key to aggregate by. This key need
-not include the cost-currency.
+
entries – A list of directives.
+
date – A datetime.date instance.
@@ -2113,50 +2009,58 @@
Returns:
-
A list of aggregated holdings.
+
A set of (base, quote) currencies.
- Source code in beancount/ops/holdings.py
-
def aggregate_holdings_by(holdings, keyfun):
- """Aggregate holdings by some key.
-
- Note that the cost-currency must always be included in the group-key (sums
- over multiple currency units do not make sense), so it is appended to the
- sort-key automatically.
-
- Args:
- keyfun: A callable, which returns the key to aggregate by. This key need
- not include the cost-currency.
- Returns:
- A list of aggregated holdings.
- """
- # Aggregate the groups of holdings.
- grouped = collections.defaultdict(list)
- for holding in holdings:
- key = (keyfun(holding), holding.cost_currency)
- grouped[key].append(holding)
- grouped_holdings = (aggregate_holdings_list(key_holdings)
- for key_holdings in grouped.values())
-
- # We could potentially filter out holdings with zero units here. These types
- # of holdings might occur on a group with leaked (i.e., non-zero) cost basis
- # and zero units. However, sometimes are valid merging of multiple
- # currencies may occur, and the number value will be legitimately set to
- # ZERO (for various reasons downstream), so we prefer not to ignore the
- # holding. Callers must be prepared to deal with a holding of ZERO units and
- # a non-zero cost basis. {0ed05c502e63, b/16}
- ## nonzero_holdings = (holding
- ## for holding in grouped_holdings
- ## if holding.number != ZERO)
-
- # Return the holdings in order.
- return sorted(grouped_holdings,
- key=lambda holding: (holding.account, holding.currency))
-
+ Source code in beancount/ops/find_prices.py
+
deffind_balance_currencies(entries,date=None):
+"""Return currencies relevant for the given date.
+
+ This computes the account balances as of the date, and returns the union of:
+ a) The currencies held at cost, and
+ b) Currency pairs from previous conversions, but only for currencies with
+ non-zero balances.
+
+ This is intended to produce the list of currencies whose prices are relevant
+ at a particular date, based on previous history.
+
+ Args:
+ entries: A list of directives.
+ date: A datetime.date instance.
+ Returns:
+ A set of (base, quote) currencies.
+ """
+ # Compute the balances.
+ currencies=set()
+ currencies_on_books=set()
+ balances,_=summarize.balance_by_account(entries,date)
+ for_,balanceinbalances.items():
+ forposinbalance:
+ ifpos.costisnotNone:
+ # Add currencies held at cost.
+ currencies.add((pos.units.currency,pos.cost.currency))
+ else:
+ # Add regular currencies.
+ currencies_on_books.add(pos.units.currency)
+
+ # Create currency pairs from the currencies which are on account balances.
+ # In order to figure out the quote currencies, we use the list of price
+ # conversions until this date.
+ converted=find_currencies_converted(entries,date)|find_currencies_priced(
+ entries,date
+ )
+ forcbaseincurrencies_on_books:
+ forbase_quoteinconverted:
+ base,quote=base_quote
+ ifbase==cbase:
+ currencies.add(base_quote)
+
+ returncurrencies
+
If there are varying 'account', 'currency' or 'cost_currency' attributes,
-their values are replaced by '*'. Otherwise they are preserved. Note that
-all the cost-currency values must be equal in order for aggregations to
-succeed (without this constraint a sum of units in different currencies has
-no meaning).
+
Return all currencies that were held at cost at some point.
+
This returns all of them, even if not on the books at a particular point in
+time. This code does not look at account balances.
@@ -2193,7 +2094,8 @@
Parameters:
-
holdings – A list of Holding instances.
+
entries – A list of directives.
+
date – A datetime.date instance.
@@ -2209,107 +2111,37 @@
Returns:
-
A single Holding instance, or None, if there are no holdings in the input
-list.
+
A list of (base, quote) currencies.
-
-
-
-
-
-
-
-
-
Exceptions:
-
-
-
ValueError – If multiple cost currencies encountered.
-
-
-
-
- Source code in beancount/ops/holdings.py
-
def aggregate_holdings_list(holdings):
- """Aggregate a list of holdings.
-
- If there are varying 'account', 'currency' or 'cost_currency' attributes,
- their values are replaced by '*'. Otherwise they are preserved. Note that
- all the cost-currency values must be equal in order for aggregations to
- succeed (without this constraint a sum of units in different currencies has
- no meaning).
-
- Args:
- holdings: A list of Holding instances.
- Returns:
- A single Holding instance, or None, if there are no holdings in the input
- list.
- Raises:
- ValueError: If multiple cost currencies encountered.
- """
- if not holdings:
- return None
-
- # Note: Holding is a bit overspecified with book and market values. We
- # recompute them from cost and price numbers here anyhow.
- units, total_book_value, total_market_value = ZERO, ZERO, ZERO
- accounts = set()
- currencies = set()
- cost_currencies = set()
- price_dates = set()
- book_value_seen = False
- market_value_seen = False
- for holding in holdings:
- units += holding.number
- accounts.add(holding.account)
- price_dates.add(holding.price_date)
- currencies.add(holding.currency)
- cost_currencies.add(holding.cost_currency)
-
- if holding.book_value is not None:
- total_book_value += holding.book_value
- book_value_seen = True
- elif holding.cost_number is not None:
- total_book_value += holding.number * holding.cost_number
- book_value_seen = True
-
- if holding.market_value is not None:
- total_market_value += holding.market_value
- market_value_seen = True
- elif holding.price_number is not None:
- total_market_value += holding.number * holding.price_number
- market_value_seen = True
-
- if book_value_seen:
- average_cost = total_book_value / units if units else None
- else:
- total_book_value = None
- average_cost = None
-
- if market_value_seen:
- average_price = total_market_value / units if units else None
- else:
- total_market_value = None
- average_price = None
-
- if len(cost_currencies) != 1:
- raise ValueError("Cost currencies are not homogeneous for aggregation: {}".format(
- ','.join(map(str, cost_currencies))))
-
- units = units if len(currencies) == 1 else ZERO
- currency = currencies.pop() if len(currencies) == 1 else '*'
- cost_currency = cost_currencies.pop()
- account_ = (accounts.pop()
- if len(accounts) == 1
- else account.commonprefix(accounts))
- price_date = price_dates.pop() if len(price_dates) == 1 else None
- return Holding(account_, units, currency, average_cost, cost_currency,
- total_book_value, total_market_value, average_price, price_date)
-
+ Source code in beancount/ops/find_prices.py
+
deffind_currencies_at_cost(entries,date=None):
+"""Return all currencies that were held at cost at some point.
+
+ This returns all of them, even if not on the books at a particular point in
+ time. This code does not look at account balances.
+
+ Args:
+ entries: A list of directives.
+ date: A datetime.date instance.
+ Returns:
+ A list of (base, quote) currencies.
+ """
+ currencies=set()
+ forentryinentries:
+ ifnotisinstance(entry,data.Transaction):
+ continue
+ ifdateandentry.date>=date:
+ break
+ forpostinginentry.postings:
+ ifposting.costisnotNoneandposting.cost.numberisnotNone:
+ currencies.add((posting.units.currency,posting.cost.currency))
+ returncurrencies
+
Convert the given list of holdings's fields to a common currency.
-
If the rate is not available to convert, leave the fields empty.
+
Return currencies from price conversions.
+
This function looks at all price conversions that occurred until some date
+and produces a list of them. Note: This does not include Price directives,
+only postings with price conversions.
@@ -2342,9 +2176,8 @@
Parameters:
-
price_map – A price-map, as built by prices.build_price_map().
-
target_currency – The target common currency to convert amounts to.
-
holdings_list – A list of holdings.Holding instances.
+
entries – A list of directives.
+
date – A datetime.date instance.
@@ -2360,76 +2193,40 @@
Returns:
-
A modified list of holdings, with the 'extra' field set to the value in
-'currency', or None, if it was not possible to convert.
+
A list of (base, quote) currencies.
- Source code in beancount/ops/holdings.py
-
def convert_to_currency(price_map, target_currency, holdings_list):
- """Convert the given list of holdings's fields to a common currency.
-
- If the rate is not available to convert, leave the fields empty.
-
- Args:
- price_map: A price-map, as built by prices.build_price_map().
- target_currency: The target common currency to convert amounts to.
- holdings_list: A list of holdings.Holding instances.
- Returns:
- A modified list of holdings, with the 'extra' field set to the value in
- 'currency', or None, if it was not possible to convert.
- """
- # A list of the fields we should convert.
- convert_fields = ('cost_number', 'book_value', 'market_value', 'price_number')
-
- new_holdings = []
- for holding in holdings_list:
- if holding.cost_currency == target_currency:
- # The holding is already priced in the target currency; do nothing.
- new_holding = holding
- else:
- if holding.cost_currency is None:
- # There is no cost currency; make the holding priced in its own
- # units. The price-map should yield a rate of 1.0 and everything
- # else works out.
- if holding.currency is None:
- raise ValueError("Invalid currency '{}'".format(holding.currency))
- holding = holding._replace(cost_currency=holding.currency)
-
- # Fill in with book and market value as well.
- if holding.book_value is None:
- holding = holding._replace(book_value=holding.number)
- if holding.market_value is None:
- holding = holding._replace(market_value=holding.number)
-
- assert holding.cost_currency, "Missing cost currency: {}".format(holding)
- base_quote = (holding.cost_currency, target_currency)
-
- # Get the conversion rate and replace the required numerical
- # fields..
- _, rate = prices.get_latest_price(price_map, base_quote)
- if rate is not None:
- new_holding = misc_utils.map_namedtuple_attributes(
- convert_fields,
- lambda number, r=rate: number if number is None else number * r,
- holding)
- # Ensure we set the new cost currency after conversion.
- new_holding = new_holding._replace(cost_currency=target_currency)
- else:
- # Could not get the rate... clear every field and set the cost
- # currency to None. This enough marks the holding conversion as
- # a failure.
- new_holding = misc_utils.map_namedtuple_attributes(
- convert_fields, lambda number: None, holding)
- new_holding = new_holding._replace(cost_currency=None)
-
- new_holdings.append(new_holding)
-
- return new_holdings
-
+ Source code in beancount/ops/find_prices.py
+
deffind_currencies_converted(entries,date=None):
+"""Return currencies from price conversions.
+
+ This function looks at all price conversions that occurred until some date
+ and produces a list of them. Note: This does not include Price directives,
+ only postings with price conversions.
+
+ Args:
+ entries: A list of directives.
+ date: A datetime.date instance.
+ Returns:
+ A list of (base, quote) currencies.
+ """
+ currencies=set()
+ forentryinentries:
+ ifnotisinstance(entry,data.Transaction):
+ continue
+ ifdateandentry.date>=date:
+ break
+ forpostinginentry.postings:
+ price=posting.price
+ ifposting.costisnotNoneorpriceisNone:
+ continue
+ currencies.add((posting.units.currency,price.currency))
+ returncurrencies
+
Return a list of commodities present at a particular date.
-
This routine fetches the holdings present at a particular date and returns a
-list of the commodities held in those holdings. This should define the list
-of price date points required to assess the market value of this portfolio.
-
Notes:
-
-
-
The ticker symbol will be fetched from the corresponding Commodity
- directive. If there is no ticker symbol defined for a directive or no
- corresponding Commodity directive, the currency is still included, but
- 'None' is specified for the symbol. The code that uses this routine should
- be free to use the currency name to make an attempt to fetch the currency
- using its name, or to ignore it.
-
-
-
The 'cost-currency' is that which is found on the holdings instance and
- can be ignored. The 'quote-currency' is that which is declared on the
- Commodity directive from its 'quote' metadata field.
-
-
-
This is used in a routine that fetches prices from a data source on the
-internet (either from LedgerHub, but you can reuse this in your own script
-if you build one).
+
Return currencies seen in Price directives.
@@ -2484,8 +2259,7 @@
entries – A list of directives.
-
date – A datetime.date instance, the date at which to get the list of
-relevant holdings.
+
date – A datetime.date instance.
@@ -2501,85 +2275,32 @@
Returns:
-
A list of (currency, cost-currency, quote-currency, ticker) tuples, where
- currency – The Beancount base currency to fetch a price for.
- cost-currency: The cost-currency of the holdings found at the given date.
- quote-currency: The currency formally declared as quote currency in the
- metadata of Commodity directives.
- ticker: The ticker symbol to use for fetching the price (extracted from
- the metadata of Commodity directives).
+
A list of (base, quote) currencies.
- Source code in beancount/ops/holdings.py
-
def get_commodities_at_date(entries, options_map, date=None):
- """Return a list of commodities present at a particular date.
-
- This routine fetches the holdings present at a particular date and returns a
- list of the commodities held in those holdings. This should define the list
- of price date points required to assess the market value of this portfolio.
-
- Notes:
-
- * The ticker symbol will be fetched from the corresponding Commodity
- directive. If there is no ticker symbol defined for a directive or no
- corresponding Commodity directive, the currency is still included, but
- 'None' is specified for the symbol. The code that uses this routine should
- be free to use the currency name to make an attempt to fetch the currency
- using its name, or to ignore it.
-
- * The 'cost-currency' is that which is found on the holdings instance and
- can be ignored. The 'quote-currency' is that which is declared on the
- Commodity directive from its 'quote' metadata field.
-
- This is used in a routine that fetches prices from a data source on the
- internet (either from LedgerHub, but you can reuse this in your own script
- if you build one).
-
- Args:
- entries: A list of directives.
- date: A datetime.date instance, the date at which to get the list of
- relevant holdings.
- Returns:
- A list of (currency, cost-currency, quote-currency, ticker) tuples, where
- currency: The Beancount base currency to fetch a price for.
- cost-currency: The cost-currency of the holdings found at the given date.
- quote-currency: The currency formally declared as quote currency in the
- metadata of Commodity directives.
- ticker: The ticker symbol to use for fetching the price (extracted from
- the metadata of Commodity directives).
- """
- # Remove all the entries after the given date, if requested.
- if date is not None:
- entries = summarize.truncate(entries, date)
-
- # Get the list of holdings at the particular date.
- holdings_list = get_final_holdings(entries)
-
- # Obtain the unique list of currencies we need to fetch.
- commodities_list = {(holding.currency, holding.cost_currency)
- for holding in holdings_list}
-
- # Add in the associated ticker symbols.
- commodities_map = getters.get_commodity_map(entries)
- commodities_symbols_list = []
- for currency, cost_currency in sorted(commodities_list):
- try:
- commodity_entry = commodities_map[currency]
- ticker = commodity_entry.meta.get('ticker', None)
- quote_currency = commodity_entry.meta.get('quote', None)
- except KeyError:
- ticker = None
- quote_currency = None
-
- commodities_symbols_list.append(
- (currency, cost_currency, quote_currency, ticker))
-
- return commodities_symbols_list
-
+ Source code in beancount/ops/find_prices.py
+
deffind_currencies_priced(entries,date=None):
+"""Return currencies seen in Price directives.
+
+ Args:
+ entries: A list of directives.
+ date: A datetime.date instance.
+ Returns:
+ A list of (base, quote) currencies.
+ """
+ currencies=set()
+ forentryinentries:
+ ifnotisinstance(entry,data.Price):
+ continue
+ ifdateandentry.date>=date:
+ break
+ currencies.add((entry.currency,entry.amount.currency))
+ returncurrencies
+
Get a dictionary of the latest holdings by account.
-
This basically just flattens the balance sheet's final positions, including
-that of equity accounts. If a 'price_map' is provided, insert price
-information in the flattened holdings at the latest date, or at the given
-date, if one is provided.
-
Only the accounts in 'included_account_types' will be included, and this is
-always called for Assets and Liabilities only. If left unspecified, holdings
-from all account types will be included, including Equity, Income and
-Expenses.
+
-
-
-
-
-
-
-
-
Parameters:
-
-
-
entries – A list of directives.
-
included_account_types – A sequence of strings, the account types to
-include in the output. A reasonable example would be
-('Assets', 'Liabilities'). If not specified, include all account types.
-
price_map – A dict of prices, as built by prices.build_price_map().
-
date – A datetime.date instance, the date at which to price the
-holdings. If left unspecified, we use the latest price information.
-
-
-
-
-
-
-
-
-
-
-
-
-
Returns:
-
-
-
A list of dicts, with the following fields –
-
-
-
-
-
-
- Source code in beancount/ops/holdings.py
-
def get_final_holdings(entries, included_account_types=None, price_map=None, date=None):
- """Get a dictionary of the latest holdings by account.
-
- This basically just flattens the balance sheet's final positions, including
- that of equity accounts. If a 'price_map' is provided, insert price
- information in the flattened holdings at the latest date, or at the given
- date, if one is provided.
-
- Only the accounts in 'included_account_types' will be included, and this is
- always called for Assets and Liabilities only. If left unspecified, holdings
- from all account types will be included, including Equity, Income and
- Expenses.
-
- Args:
- entries: A list of directives.
- included_account_types: A sequence of strings, the account types to
- include in the output. A reasonable example would be
- ('Assets', 'Liabilities'). If not specified, include all account types.
- price_map: A dict of prices, as built by prices.build_price_map().
- date: A datetime.date instance, the date at which to price the
- holdings. If left unspecified, we use the latest price information.
- Returns:
- A list of dicts, with the following fields:
- """
- # Remove the entries inserted by unrealized gains/losses. Those entries do
- # affect asset accounts, and we don't want them to appear in holdings.
- #
- # Note: Perhaps it would make sense to generalize this concept of "inserted
- # unrealized gains."
- simple_entries = [entry
- for entry in entries
- if (not isinstance(entry, data.Transaction) or
- entry.flag != flags.FLAG_UNREALIZED)]
-
- # Realize the accounts into a tree (because we want the positions by-account).
- root_account = realization.realize(simple_entries)
-
- # For each account, look at the list of positions and build a list.
- holdings = []
- for real_account in sorted(list(realization.iter_children(root_account)),
- key=lambda ra: ra.account):
-
- if included_account_types:
- # Skip accounts of invalid types, we only want to reflect the requested
- # account types, typically assets and liabilities.
- account_type = account_types.get_account_type(real_account.account)
- if account_type not in included_account_types:
- continue
-
- for pos in real_account.balance.get_positions():
- if pos.cost is not None:
- # Get price information if we have a price_map.
- market_value = None
- if price_map is not None:
- base_quote = (pos.units.currency, pos.cost.currency)
- price_date, price_number = prices.get_price(price_map,
- base_quote, date)
- if price_number is not None:
- market_value = pos.units.number * price_number
- else:
- price_date, price_number = None, None
-
- holding = Holding(real_account.account,
- pos.units.number,
- pos.units.currency,
- pos.cost.number,
- pos.cost.currency,
- pos.units.number * pos.cost.number,
- market_value,
- price_number,
- price_date)
- else:
- holding = Holding(real_account.account,
- pos.units.number,
- pos.units.currency,
- None,
- pos.units.currency,
- pos.units.number,
- pos.units.number,
- None,
- None)
- holdings.append(holding)
-
- return holdings
-
def holding_to_position(holding):
- """Convert the holding to a position.
-
- Args:
- holding: An instance of Holding.
- Returns:
- An instance of Position.
- """
- return position.Position(
- amount.Amount(holding.number, holding.currency),
- (position.Cost(holding.cost_number, holding.cost_currency, None, None)
- if holding.cost_number
- else None))
-
-
-
+
-
+
Given a Beancount ledger, compute time intervals where we hold each commodity.
+
This script computes, for each commodity, which time intervals it is required at.
+This can then be used to identify a list of dates at which we need to fetch prices
+in order to properly fill the price database.
Convert the market and book values of the given list of holdings to relative data.
+
Compress a list of date pairs to ignore short stretches of unused days.
@@ -2903,7 +2376,9 @@
Parameters:
-
holdings – A list of Holding instances.
+
intervals – A list of pairs of datetime.date instances.
+
num_days – An integer, the number of unused days to require for intervals
+to be distinct, to allow a gap.
@@ -2919,62 +2394,38 @@
Returns:
-
A list of holdings instances with the absolute value fields replaced by
-fractions of total portfolio. The new list of holdings is sorted by
-currency, and the relative fractions are also relative to that currency.
+
A new dict of lifetimes map where some intervals may have been joined.
- Source code in beancount/ops/holdings.py
-
def reduce_relative(holdings):
- """Convert the market and book values of the given list of holdings to relative data.
-
- Args:
- holdings: A list of Holding instances.
- Returns:
- A list of holdings instances with the absolute value fields replaced by
- fractions of total portfolio. The new list of holdings is sorted by
- currency, and the relative fractions are also relative to that currency.
- """
- # Group holdings by value currency.
- by_currency = collections.defaultdict(list)
- ordering = {}
- for index, holding in enumerate(holdings):
- ordering.setdefault(holding.cost_currency, index)
- by_currency[holding.cost_currency].append(holding)
-
- fractional_holdings = []
- for currency in sorted(by_currency, key=ordering.get):
- currency_holdings = by_currency[currency]
-
- # Compute total market value for that currency.
- total_book_value = ZERO
- total_market_value = ZERO
- for holding in currency_holdings:
- if holding.book_value:
- total_book_value += holding.book_value
- if holding.market_value:
- total_market_value += holding.market_value
-
- # Sort the currency's holdings with decreasing values of market value.
- currency_holdings.sort(
- key=lambda holding: holding.market_value or ZERO,
- reverse=True)
-
- # Output new holdings with the relevant values replaced.
- for holding in currency_holdings:
- fractional_holdings.append(
- holding._replace(book_value=(holding.book_value / total_book_value
- if holding.book_value is not None
- else None),
- market_value=(holding.market_value / total_market_value
- if holding.market_value is not None
- else None)))
- return fractional_holdings
-
+ Source code in beancount/ops/lifetimes.py
+
defcompress_intervals_days(intervals,num_days):
+"""Compress a list of date pairs to ignore short stretches of unused days.
+
+ Args:
+ intervals: A list of pairs of datetime.date instances.
+ num_days: An integer, the number of unused days to require for intervals
+ to be distinct, to allow a gap.
+ Returns:
+ A new dict of lifetimes map where some intervals may have been joined.
+ """
+ ignore_interval=datetime.timedelta(days=num_days)
+ new_intervals=[]
+ iter_intervals=iter(intervals)
+ last_begin,last_end=next(iter_intervals)
+ fordate_begin,date_endiniter_intervals:
+ ifdate_begin-last_end<ignore_interval:
+ # Compress.
+ last_end=date_end
+ continue
+ new_intervals.append((last_begin,last_end))
+ last_begin,last_end=date_begin,date_end
+ new_intervals.append((last_begin,last_end))
+ returnnew_intervals
+
Compress a lifetimes map to ignore short stretches of unused days.
@@ -3006,8 +2457,8 @@
Parameters:
-
holding – An instance of Holding.
-
scale_factor – A float or Decimal number.
+
lifetimes_map – A dict of currency intervals as returned by get_commodity_lifetimes.
+
num_days – An integer, the number of unused days to ignore.
@@ -3023,34 +2474,28 @@
Returns:
-
A scaled copy of the holding.
+
A new dict of lifetimes map where some intervals may have been joined.
- Source code in beancount/ops/holdings.py
-
def scale_holding(holding, scale_factor):
- """Scale the values of a holding.
-
- Args:
- holding: An instance of Holding.
- scale_factor: A float or Decimal number.
- Returns:
- A scaled copy of the holding.
- """
- return Holding(
- holding.account,
- holding.number * scale_factor if holding.number else None,
- holding.currency,
- holding.cost_number,
- holding.cost_currency,
- holding.book_value * scale_factor if holding.book_value else None,
- holding.market_value * scale_factor if holding.market_value else None,
- holding.price_number,
- holding.price_date)
-
+ Source code in beancount/ops/lifetimes.py
+
defcompress_lifetimes_days(lifetimes_map,num_days):
+"""Compress a lifetimes map to ignore short stretches of unused days.
+
+ Args:
+ lifetimes_map: A dict of currency intervals as returned by get_commodity_lifetimes.
+ num_days: An integer, the number of unused days to ignore.
+ Returns:
+ A new dict of lifetimes map where some intervals may have been joined.
+ """
+ return{
+ currency_pair:compress_intervals_days(intervals,num_days)
+ forcurrency_pair,intervalsinlifetimes_map.items()
+ }
+
Given a Beancount ledger, compute time intervals where we hold each commodity.
-
This script computes, for each commodity, which time intervals it is required at.
-This can then be used to identify a list of dates at which we need to fetch prices
-in order to properly fill the price database.
Compress a list of date pairs to ignore short stretches of unused days.
+
Given a list of directives, figure out the life of each commodity.
@@ -3126,9 +2527,7 @@
Parameters:
-
intervals – A list of pairs of datetime.date instances.
-
num_days – An integer, the number of unused days to require for intervals
-to be distinct, to allow a gap.
+
entries – A list of directives.
@@ -3144,7 +2543,9 @@
Returns:
-
A new dict of lifetimes map where some intervals may have been joined.
+
A dict of (currency, cost-currency) commodity strings to lists of (start,
+end) datetime.date pairs. The dates are inclusive of the day the commodity
+was seen; the end/last dates are one day after the last date seen.
@@ -3152,30 +2553,64 @@
Source code in beancount/ops/lifetimes.py
-
def compress_intervals_days(intervals, num_days):
- """Compress a list of date pairs to ignore short stretches of unused days.
-
- Args:
- intervals: A list of pairs of datetime.date instances.
- num_days: An integer, the number of unused days to require for intervals
- to be distinct, to allow a gap.
- Returns:
- A new dict of lifetimes map where some intervals may have been joined.
- """
- ignore_interval = datetime.timedelta(days=num_days)
- new_intervals = []
- iter_intervals = iter(intervals)
- last_begin, last_end = next(iter_intervals)
- for date_begin, date_end in iter_intervals:
- if date_begin - last_end < ignore_interval:
- # Compress.
- last_end = date_end
- continue
- new_intervals.append((last_begin, last_end))
- last_begin, last_end = date_begin, date_end
- new_intervals.append((last_begin, last_end))
- return new_intervals
-
+
defget_commodity_lifetimes(entries):
+"""Given a list of directives, figure out the life of each commodity.
+
+ Args:
+ entries: A list of directives.
+ Returns:
+ A dict of (currency, cost-currency) commodity strings to lists of (start,
+ end) datetime.date pairs. The dates are inclusive of the day the commodity
+ was seen; the end/last dates are one day _after_ the last date seen.
+ """
+ lifetimes=collections.defaultdict(list)
+
+ # The current set of active commodities.
+ commodities=set()
+
+ # The current balances across all accounts.
+ balances=collections.defaultdict(inventory.Inventory)
+
+ forentryinentries:
+ # Process only transaction entries.
+ ifnotisinstance(entry,data.Transaction):
+ continue
+
+ # Update the balance of affected accounts and check locally whether that
+ # triggered a change in the set of commodities.
+ commodities_changed=False
+ forpostinginentry.postings:
+ balance=balances[posting.account]
+ commodities_before=balance.currency_pairs()
+ balance.add_position(posting)
+ commodities_after=balance.currency_pairs()
+ ifcommodities_after!=commodities_before:
+ commodities_changed=True
+
+ # If there was a change in one of the affected account's list of
+ # commodities, recompute the total set globally. This should not
+ # occur very frequently.
+ ifcommodities_changed:
+ new_commodities=set(
+ itertools.chain(*(inv.currency_pairs()forinvinbalances.values()))
+ )
+ ifnew_commodities!=commodities:
+ # The new global set of commodities has changed; update our
+ # the dictionary of intervals.
+ forcurrencyinnew_commodities-commodities:
+ lifetimes[currency].append((entry.date,None))
+
+ forcurrencyincommodities-new_commodities:
+ lifetime=lifetimes[currency]
+ begin_date,end_date=lifetime.pop(-1)
+ assertend_dateisNone
+ lifetime.append((begin_date,entry.date+ONEDAY))
+
+ # Update our current set.
+ commodities=new_commodities
+
+ returnlifetimes
+
Compress a lifetimes map to ignore short stretches of unused days.
+
Enumerate all the commodities and days where the price is required.
+
Given a map of lifetimes for a set of commodities, enumerate all the days
+for each commodity where it is active. This can be used to connect to a
+historical price fetcher routine to fill in missing price entries from an
+existing ledger.
@@ -3207,8 +2646,10 @@
Parameters:
-
lifetimes_map – A dict of currency intervals as returned by get_commodity_lifetimes.
-
num_days – An integer, the number of unused days to ignore.
+
lifetimes_map – A dict of currency to active intervals as returned by
+get_commodity_lifetimes().
+
date_last – A datetime.date instance, the last date which we're interested in.
+
weekdays_only – Option to limit fetching to weekdays only.
@@ -3224,7 +2665,7 @@
Returns:
-
A new dict of lifetimes map where some intervals may have been joined.
+
Tuples of (date, currency, cost-currency).
@@ -3232,18 +2673,46 @@
Source code in beancount/ops/lifetimes.py
-
def compress_lifetimes_days(lifetimes_map, num_days):
- """Compress a lifetimes map to ignore short stretches of unused days.
-
- Args:
- lifetimes_map: A dict of currency intervals as returned by get_commodity_lifetimes.
- num_days: An integer, the number of unused days to ignore.
- Returns:
- A new dict of lifetimes map where some intervals may have been joined.
- """
- return {currency_pair: compress_intervals_days(intervals, num_days)
- for currency_pair, intervals in lifetimes_map.items()}
-
+
defrequired_daily_prices(lifetimes_map,date_last,weekdays_only=False):
+"""Enumerate all the commodities and days where the price is required.
+
+ Given a map of lifetimes for a set of commodities, enumerate all the days
+ for each commodity where it is active. This can be used to connect to a
+ historical price fetcher routine to fill in missing price entries from an
+ existing ledger.
+
+ Args:
+ lifetimes_map: A dict of currency to active intervals as returned by
+ get_commodity_lifetimes().
+ date_last: A datetime.date instance, the last date which we're interested in.
+ weekdays_only: Option to limit fetching to weekdays only.
+ Returns:
+ Tuples of (date, currency, cost-currency).
+ """
+ results=[]
+ forcurrency_pair,intervalsinlifetimes_map.items():
+ ifcurrency_pair[1]isNone:
+ continue
+ fordate_begin,date_endinintervals:
+ # Find first Weekday starting on or before minimum date.
+ date=date_begin
+ ifweekdays_only:
+ diff_days=4-date_begin.weekday()
+ ifdiff_days<0:
+ date+=datetime.timedelta(days=diff_days)
+
+ # Iterate over all weekdays.
+ ifdate_endisNone:
+ date_end=date_last
+ whiledate<date_end:
+ results.append((date,currency_pair[0],currency_pair[1]))
+ ifweekdays_onlyanddate.weekday()==4:
+ date+=3*ONEDAY
+ else:
+ date+=ONEDAY
+
+ returnsorted(results)
+
Given a list of directives, figure out the life of each commodity.
+
Enumerate all the commodities and Fridays where the price is required.
+
Given a map of lifetimes for a set of commodities, enumerate all the Fridays
+for each commodity where it is active. This can be used to connect to a
+historical price fetcher routine to fill in missing price entries from an
+existing ledger.
@@ -3275,7 +2748,9 @@
Parameters:
-
entries – A list of directives.
+
lifetimes_map – A dict of currency to active intervals as returned by
+get_commodity_lifetimes().
+
date_last – A datetime.date instance, the last date which we're interested in.
@@ -3291,9 +2766,7 @@
Returns:
-
A dict of (currency, cost-currency) commodity strings to lists of (start,
-end) datetime.date pairs. The dates are inclusive of the day the commodity
-was seen; the end/last dates are one day after the last date seen.
+
Tuples of (date, currency, cost-currency).
@@ -3301,63 +2774,40 @@
Source code in beancount/ops/lifetimes.py
-
def get_commodity_lifetimes(entries):
- """Given a list of directives, figure out the life of each commodity.
-
- Args:
- entries: A list of directives.
- Returns:
- A dict of (currency, cost-currency) commodity strings to lists of (start,
- end) datetime.date pairs. The dates are inclusive of the day the commodity
- was seen; the end/last dates are one day _after_ the last date seen.
- """
- lifetimes = collections.defaultdict(list)
-
- # The current set of active commodities.
- commodities = set()
-
- # The current balances across all accounts.
- balances = collections.defaultdict(inventory.Inventory)
-
- for entry in entries:
- # Process only transaction entries.
- if not isinstance(entry, data.Transaction):
- continue
-
- # Update the balance of affected accounts and check locally whether that
- # triggered a change in the set of commodities.
- commodities_changed = False
- for posting in entry.postings:
- balance = balances[posting.account]
- commodities_before = balance.currency_pairs()
- balance.add_position(posting)
- commodities_after = balance.currency_pairs()
- if commodities_after != commodities_before:
- commodities_changed = True
-
- # If there was a change in one of the affected account's list of
- # commodities, recompute the total set globally. This should not
- # occur very frequently.
- if commodities_changed:
- new_commodities = set(
- itertools.chain(*(inv.currency_pairs() for inv in balances.values())))
- if new_commodities != commodities:
- # The new global set of commodities has changed; update our
- # the dictionary of intervals.
- for currency in new_commodities - commodities:
- lifetimes[currency].append((entry.date, None))
-
- for currency in commodities - new_commodities:
- lifetime = lifetimes[currency]
- begin_date, end_date = lifetime.pop(-1)
- assert end_date is None
- lifetime.append((begin_date, entry.date + ONEDAY))
-
- # Update our current set.
- commodities = new_commodities
-
- return lifetimes
-
+
defrequired_weekly_prices(lifetimes_map,date_last):
+"""Enumerate all the commodities and Fridays where the price is required.
+
+ Given a map of lifetimes for a set of commodities, enumerate all the Fridays
+ for each commodity where it is active. This can be used to connect to a
+ historical price fetcher routine to fill in missing price entries from an
+ existing ledger.
+
+ Args:
+ lifetimes_map: A dict of currency to active intervals as returned by
+ get_commodity_lifetimes().
+ date_last: A datetime.date instance, the last date which we're interested in.
+ Returns:
+ Tuples of (date, currency, cost-currency).
+ """
+ results=[]
+ forcurrency_pair,intervalsinlifetimes_map.items():
+ ifcurrency_pair[1]isNone:
+ continue
+ fordate_begin,date_endinintervals:
+ # Find first Friday before the minimum date.
+ diff_days=4-date_begin.weekday()
+ ifdiff_days>=1:
+ diff_days-=7
+ date=date_begin+datetime.timedelta(days=diff_days)
+
+ # Iterate over all Fridays.
+ ifdate_endisNone:
+ date_end=date_last
+ whiledate<date_end:
+ results.append((date,currency_pair[0],currency_pair[1]))
+ date+=ONE_WEEK
+ returnsorted(results)
+
Enumerate all the commodities and Fridays where the price is required.
-
Given a map of lifetimes for a set of commodities, enumerate all the Fridays
-for each commodity where it is active. This can be used to connect to a
-historical price fetcher routine to fill in missing price entries from an
-existing ledger.
+
Trim a list of date pairs to be within a start and end date.
+Useful in update-style price fetching.
@@ -3393,9 +2840,9 @@
Parameters:
-
lifetimes_map – A dict of currency to active intervals as returned by
-get_commodity_lifetimes().
-
date_last – A datetime.date instance, the last date which we're interested in.
+
intervals – A list of pairs of datetime.date instances
+
trim_start – An inclusive starting date.
+
trim_end – An exclusive starting date.
@@ -3411,7 +2858,7 @@
Returns:
-
Tuples of (date, currency, cost-currency).
+
A list of new intervals (pairs of (date, date)).
@@ -3419,40 +2866,33 @@
Source code in beancount/ops/lifetimes.py
-
def required_weekly_prices(lifetimes_map, date_last):
- """Enumerate all the commodities and Fridays where the price is required.
-
- Given a map of lifetimes for a set of commodities, enumerate all the Fridays
- for each commodity where it is active. This can be used to connect to a
- historical price fetcher routine to fill in missing price entries from an
- existing ledger.
-
- Args:
- lifetimes_map: A dict of currency to active intervals as returned by
- get_commodity_lifetimes().
- date_last: A datetime.date instance, the last date which we're interested in.
- Returns:
- Tuples of (date, currency, cost-currency).
- """
- results = []
- for currency_pair, intervals in lifetimes_map.items():
- if currency_pair[1] is None:
- continue
- for date_begin, date_end in intervals:
- # Find first Friday before the minimum date.
- diff_days = 4 - date_begin.weekday()
- if diff_days > 1:
- diff_days -= 7
- date = date_begin + datetime.timedelta(days=diff_days)
-
- # Iterate over all Fridays.
- if date_end is None:
- date_end = date_last
- while date < date_end:
- results.append((date, currency_pair[0], currency_pair[1]))
- date += ONE_WEEK
- return sorted(results)
-
+
deftrim_intervals(intervals,trim_start=None,trim_end=None):
+"""Trim a list of date pairs to be within a start and end date.
+ Useful in update-style price fetching.
+
+ Args:
+ intervals: A list of pairs of datetime.date instances
+ trim_start: An inclusive starting date.
+ trim_end: An exclusive starting date.
+ Returns:
+ A list of new intervals (pairs of (date, date)).
+ """
+ new_intervals=[]
+ iter_intervals=iter(intervals)
+ iftrim_startisnotNoneandtrim_endisnotNoneandtrim_end<trim_start:
+ raiseValueError("Trim end date is before start date")
+
+ fordate_begin,date_endiniter_intervals:
+ iftrim_startisnotNoneandtrim_start>date_begin:
+ date_begin=trim_start
+ iftrim_endisnotNone:
+ ifdate_endisNoneortrim_end<date_end:
+ date_end=trim_end
+
+ ifdate_endisNoneordate_begin<=date_end:
+ new_intervals.append((date_begin,date_end))
+ returnnew_intervals
+
def pad(entries, options_map):
- """Insert transaction entries for to fulfill a subsequent balance check.
-
- Synthesize and insert Transaction entries right after Pad entries in order
- to fulfill checks in the padded accounts. Returns a new list of entries.
- Note that this doesn't pad across parent-child relationships, it is a very
- simple kind of pad. (I have found this to be sufficient in practice, and
- simpler to implement and understand.)
-
- Furthermore, this pads for a single currency only, that is, balance checks
- are specified only for one currency at a time, and pads will only be
- inserted for those currencies.
-
- Args:
- entries: A list of directives.
- options_map: A parser options dict.
- Returns:
- A new list of directives, with Pad entries inserted, and a list of new
- errors produced.
- """
- pad_errors = []
-
- # Find all the pad entries and group them by account.
- pads = list(misc_utils.filter_type(entries, data.Pad))
- pad_dict = misc_utils.groupby(lambda x: x.account, pads)
-
- # Partially realize the postings, so we can iterate them by account.
- by_account = realization.postings_by_account(entries)
-
- # A dict of pad -> list of entries to be inserted.
- new_entries = {id(pad): [] for pad in pads}
-
- # Process each account that has a padding group.
- for account_, pad_list in sorted(pad_dict.items()):
-
- # Last encountered / currency active pad entry.
- active_pad = None
-
- # Gather all the postings for the account and its children.
- postings = []
- is_child = account.parent_matcher(account_)
- for item_account, item_postings in by_account.items():
- if is_child(item_account):
- postings.extend(item_postings)
- postings.sort(key=data.posting_sortkey)
-
- # A set of currencies already padded so far in this account.
- padded_lots = set()
-
- pad_balance = inventory.Inventory()
- for entry in postings:
-
- assert not isinstance(entry, data.Posting)
- if isinstance(entry, data.TxnPosting):
- # This is a transaction; update the running balance for this
- # account.
- pad_balance.add_position(entry.posting)
-
- elif isinstance(entry, data.Pad):
- if entry.account == account_:
- # Mark this newly encountered pad as active and allow all lots
- # to be padded heretofore.
- active_pad = entry
- padded_lots = set()
-
- elif isinstance(entry, data.Balance):
- check_amount = entry.amount
-
- # Compare the current balance amount to the expected one from
- # the check entry. IMPORTANT: You need to understand that this
- # does not check a single position, but rather checks that the
- # total amount for a particular currency (which itself is
- # distinct from the cost).
- balance_amount = pad_balance.get_currency_units(check_amount.currency)
- diff_amount = amount.sub(balance_amount, check_amount)
-
- # Use the specified tolerance or automatically infer it.
- tolerance = balance.get_balance_tolerance(entry, options_map)
-
- if abs(diff_amount.number) > tolerance:
- # The check fails; we need to pad.
-
- # Pad only if pad entry is active and we haven't already
- # padded that lot since it was last encountered.
- if active_pad and (check_amount.currency not in padded_lots):
-
- # Note: we decide that it's an error to try to pad
- # positions at cost; we check here that all the existing
- # positions with that currency have no cost.
- positions = [pos
- for pos in pad_balance.get_positions()
- if pos.units.currency == check_amount.currency]
- for position_ in positions:
- if position_.cost is not None:
- pad_errors.append(
- PadError(entry.meta,
- ("Attempt to pad an entry with cost for "
- "balance: {}".format(pad_balance)),
- active_pad))
-
- # Thus our padding lot is without cost by default.
- diff_position = position.Position.from_amounts(
- amount.Amount(check_amount.number - balance_amount.number,
- check_amount.currency))
-
- # Synthesize a new transaction entry for the difference.
- narration = ('(Padding inserted for Balance of {} for '
- 'difference {})').format(check_amount, diff_position)
- new_entry = data.Transaction(
- active_pad.meta.copy(), active_pad.date, flags.FLAG_PADDING,
- None, narration, data.EMPTY_SET, data.EMPTY_SET, [])
-
- new_entry.postings.append(
- data.Posting(active_pad.account,
- diff_position.units, diff_position.cost,
- None, None, None))
- neg_diff_position = -diff_position
- new_entry.postings.append(
- data.Posting(active_pad.source_account,
- neg_diff_position.units, neg_diff_position.cost,
- None, None, None))
-
- # Save it for later insertion after the active pad.
- new_entries[id(active_pad)].append(new_entry)
-
- # Fixup the running balance.
- pos, _ = pad_balance.add_position(diff_position)
- if pos is not None and pos.is_negative_at_cost():
- raise ValueError(
- "Position held at cost goes negative: {}".format(pos))
-
- # Mark this lot as padded. Further checks should not pad this lot.
- padded_lots.add(check_amount.currency)
-
- # Insert the newly created entries right after the pad entries that created them.
- padded_entries = []
- for entry in entries:
- padded_entries.append(entry)
- if isinstance(entry, data.Pad):
- entry_list = new_entries[id(entry)]
- if entry_list:
- padded_entries.extend(entry_list)
- else:
- # Generate errors on unused pad entries.
- pad_errors.append(
- PadError(entry.meta, "Unused Pad entry", entry))
-
- return padded_entries, pad_errors
-
+
defpad(entries,options_map):
+"""Insert transaction entries for to fulfill a subsequent balance check.
+
+ Synthesize and insert Transaction entries right after Pad entries in order
+ to fulfill checks in the padded accounts. Returns a new list of entries.
+ Note that this doesn't pad across parent-child relationships, it is a very
+ simple kind of pad. (I have found this to be sufficient in practice, and
+ simpler to implement and understand.)
+
+ Furthermore, this pads for a single currency only, that is, balance checks
+ are specified only for one currency at a time, and pads will only be
+ inserted for those currencies.
+
+ Args:
+ entries: A list of directives.
+ options_map: A parser options dict.
+ Returns:
+ A new list of directives, with Pad entries inserted, and a list of new
+ errors produced.
+ """
+ pad_errors=[]
+
+ # Find all the pad entries and group them by account.
+ pads=list(misc_utils.filter_type(entries,data.Pad))
+ pad_dict=misc_utils.groupby(lambdax:x.account,pads)
+
+ # Partially realize the postings, so we can iterate them by account.
+ by_account=realization.postings_by_account(entries)
+
+ # A dict of pad -> list of entries to be inserted.
+ new_entries={id(pad):[]forpadinpads}
+
+ # Process each account that has a padding group.
+ foraccount_,pad_listinsorted(pad_dict.items()):
+ # Last encountered / currency active pad entry.
+ active_pad=None
+
+ # Gather all the postings for the account and its children.
+ postings=[]
+ is_child=account.parent_matcher(account_)
+ foritem_account,item_postingsinby_account.items():
+ ifis_child(item_account):
+ postings.extend(item_postings)
+ postings.sort(key=data.posting_sortkey)
+
+ # A set of currencies already padded so far in this account.
+ padded_lots=set()
+
+ pad_balance=inventory.Inventory()
+ forentryinpostings:
+ assertnotisinstance(entry,data.Posting)
+ ifisinstance(entry,data.TxnPosting):
+ # This is a transaction; update the running balance for this
+ # account.
+ pad_balance.add_position(entry.posting)
+
+ elifisinstance(entry,data.Pad):
+ ifentry.account==account_:
+ # Mark this newly encountered pad as active and allow all lots
+ # to be padded heretofore.
+ active_pad=entry
+ padded_lots=set()
+
+ elifisinstance(entry,data.Balance):
+ check_amount=entry.amount
+
+ # Compare the current balance amount to the expected one from
+ # the check entry. IMPORTANT: You need to understand that this
+ # does not check a single position, but rather checks that the
+ # total amount for a particular currency (which itself is
+ # distinct from the cost).
+ balance_amount=pad_balance.get_currency_units(check_amount.currency)
+ diff_amount=amount.sub(balance_amount,check_amount)
+
+ # Use the specified tolerance or automatically infer it.
+ tolerance=balance.get_balance_tolerance(entry,options_map)
+
+ ifabs(diff_amount.number)>tolerance:
+ # The check fails; we need to pad.
+
+ # Pad only if pad entry is active and we haven't already
+ # padded that lot since it was last encountered.
+ ifactive_padand(check_amount.currencynotinpadded_lots):
+ # Note: we decide that it's an error to try to pad
+ # positions at cost; we check here that all the existing
+ # positions with that currency have no cost.
+ positions=[
+ pos
+ forposinpad_balance.get_positions()
+ ifpos.units.currency==check_amount.currency
+ ]
+ forposition_inpositions:
+ ifposition_.costisnotNone:
+ pad_errors.append(
+ PadError(
+ entry.meta,
+ (
+ "Attempt to pad an entry with cost for "
+ "balance: {}".format(pad_balance)
+ ),
+ active_pad,
+ )
+ )
+
+ # Thus our padding lot is without cost by default.
+ diff_position=position.Position.from_amounts(
+ amount.Amount(
+ check_amount.number-balance_amount.number,
+ check_amount.currency,
+ )
+ )
+
+ # Synthesize a new transaction entry for the difference.
+ narration=(
+ "(Padding inserted for Balance of {} for ""difference {})"
+ ).format(check_amount,diff_position)
+ new_entry=data.Transaction(
+ active_pad.meta.copy(),
+ active_pad.date,
+ flags.FLAG_PADDING,
+ None,
+ narration,
+ data.EMPTY_SET,
+ data.EMPTY_SET,
+ [],
+ )
+
+ new_entry.postings.append(
+ data.Posting(
+ active_pad.account,
+ diff_position.units,
+ diff_position.cost,
+ None,
+ None,
+ {},
+ )
+ )
+ neg_diff_position=-diff_position
+ new_entry.postings.append(
+ data.Posting(
+ active_pad.source_account,
+ neg_diff_position.units,
+ neg_diff_position.cost,
+ None,
+ None,
+ {},
+ )
+ )
+
+ # Save it for later insertion after the active pad.
+ new_entries[id(active_pad)].append(new_entry)
+
+ # Fixup the running balance.
+ pos,_=pad_balance.add_position(diff_position)
+ ifposisnotNoneandpos.is_negative_at_cost():
+ raiseValueError(
+ "Position held at cost goes negative: {}".format(pos)
+ )
+
+ # Mark this lot as padded. Further checks should not pad this lot.
+ padded_lots.add(check_amount.currency)
+
+ # Insert the newly created entries right after the pad entries that created them.
+ padded_entries=[]
+ forentryinentries:
+ padded_entries.append(entry)
+ ifisinstance(entry,data.Pad):
+ entry_list=new_entries[id(entry)]
+ ifentry_list:
+ padded_entries.extend(entry_list)
+ else:
+ # Generate errors on unused pad entries.
+ pad_errors.append(PadError(entry.meta,"Unused Pad entry",entry))
+
+ returnpadded_entries,pad_errors
+
date – An optional datetime.date instance. If provided, stop accumulating
on and after this date. This is useful for summarization before a
specific date.
+
compress_unbooked – For accounts that have a booking method of NONE,
+compress their positions into a single average position. This can be
+used when you export the full list of positions, because those accounts
+will have a myriad of small positions from fees at negative cost and
+what-not.
@@ -3936,42 +3408,67 @@
Source code in beancount/ops/summarize.py
-
def balance_by_account(entries, date=None):
- """Sum up the balance per account for all entries strictly before 'date'.
-
- Args:
- entries: A list of directives.
- date: An optional datetime.date instance. If provided, stop accumulating
- on and after this date. This is useful for summarization before a
- specific date.
- Returns:
- A pair of a dict of account string to instance Inventory (the balance of
- this account before the given date), and the index in the list of entries
- where the date was encountered. If all entries are located before the
- cutoff date, an index one beyond the last entry is returned.
- """
- balances = collections.defaultdict(inventory.Inventory)
- for index, entry in enumerate(entries):
- if date and entry.date >= date:
- break
-
- if isinstance(entry, Transaction):
- for posting in entry.postings:
- account_balance = balances[posting.account]
-
- # Note: We must allow negative lots at cost, because this may be
- # used to reduce a filtered list of entries which may not
- # include the entries necessary to keep units at cost always
- # above zero. The only summation that is guaranteed to be above
- # zero is if all the entries are being summed together, no
- # entries are filtered, at least for a particular account's
- # postings.
- account_balance.add_position(posting)
- else:
- index = len(entries)
-
- return balances, index
-
+
defbalance_by_account(entries,date=None,compress_unbooked=False):
+"""Sum up the balance per account for all entries strictly before 'date'.
+
+ Args:
+ entries: A list of directives.
+ date: An optional datetime.date instance. If provided, stop accumulating
+ on and after this date. This is useful for summarization before a
+ specific date.
+ compress_unbooked: For accounts that have a booking method of NONE,
+ compress their positions into a single average position. This can be
+ used when you export the full list of positions, because those accounts
+ will have a myriad of small positions from fees at negative cost and
+ what-not.
+ Returns:
+ A pair of a dict of account string to instance Inventory (the balance of
+ this account before the given date), and the index in the list of entries
+ where the date was encountered. If all entries are located before the
+ cutoff date, an index one beyond the last entry is returned.
+
+ """
+ balances=collections.defaultdict(inventory.Inventory)
+ forindex,entryinenumerate(entries):
+ ifdateandentry.date>=date:
+ break
+
+ ifisinstance(entry,Transaction):
+ forpostinginentry.postings:
+ account_balance=balances[posting.account]
+
+ # Note: We must allow negative lots at cost, because this may be
+ # used to reduce a filtered list of entries which may not
+ # include the entries necessary to keep units at cost always
+ # above zero. The only summation that is guaranteed to be above
+ # zero is if all the entries are being summed together, no
+ # entries are filtered, at least for a particular account's
+ # postings.
+ account_balance.add_position(posting)
+ else:
+ index=len(entries)
+
+ # If the account has "NONE" booking method, merge all its postings
+ # together in order to obtain an accurate cost basis and balance of
+ # units.
+ #
+ # (This is a complex issue.) If you accrued positions without having them
+ # booked properly against existing cost bases, you have not properly accounted
+ # for the profit/loss to other postings. This means that the resulting
+ # profit/loss is merged in the cost basis of the positive and negative
+ # postings.
+ ifcompress_unbooked:
+ oc_map=getters.get_account_open_close(entries)
+ accounts_map={account:dopenforaccount,(dopen,_)inoc_map.items()}
+
+ foraccount,balanceinbalances.items():
+ dopen=accounts_map.get(account,None)
+ ifdopenisnotNoneanddopen.bookingisdata.Booking.NONE:
+ average_balance=balance.average()
+ balances[account]=inventory.Inventory(posforposinaverage_balance)
+
+ returnbalances,index
+
def cap(entries,
- account_types,
- conversion_currency,
- account_earnings,
- account_conversions):
- """Transfer net income to equity and insert a final conversion entry.
-
- This is used to move and nullify balances from the income and expense
- accounts to an equity account in order to draw up a balance sheet with a
- balance of precisely zero.
-
- Args:
- entries: A list of directives.
- account_types: An instance of AccountTypes.
- conversion_currency: A string, the transfer currency to use for zero prices
- on the conversion entry.
- account_earnings: A string, the name of the equity account to transfer
- final balances of the income and expense accounts to.
- account_conversions: A string, the name of the equity account to use as
- the source for currency conversions.
- Returns:
- A modified list of entries, with the income and expense accounts
- transferred.
- """
-
- # Transfer the balances of income and expense accounts as earnings / net
- # income.
- income_statement_account_pred = (
- lambda account: is_income_statement_account(account, account_types))
- entries = transfer_balances(entries, None,
- income_statement_account_pred,
- account_earnings)
-
- # Insert final conversion entries.
- entries = conversions(entries, account_conversions, conversion_currency, None)
-
- return entries
-
+
defcap(entries,account_types,conversion_currency,account_earnings,account_conversions):
+"""Transfer net income to equity and insert a final conversion entry.
+
+ This is used to move and nullify balances from the income and expense
+ accounts to an equity account in order to draw up a balance sheet with a
+ balance of precisely zero.
+
+ Args:
+ entries: A list of directives.
+ account_types: An instance of AccountTypes.
+ conversion_currency: A string, the transfer currency to use for zero prices
+ on the conversion entry.
+ account_earnings: A string, the name of the equity account to transfer
+ final balances of the income and expense accounts to.
+ account_conversions: A string, the name of the equity account to use as
+ the source for currency conversions.
+ Returns:
+ A modified list of entries, with the income and expense accounts
+ transferred.
+ """
+
+ # Transfer the balances of income and expense accounts as earnings / net
+ # income.
+ income_statement_account_pred=lambdaaccount:is_income_statement_account(
+ account,account_types
+ )
+ entries=transfer_balances(
+ entries,None,income_statement_account_pred,account_earnings
+ )
+
+ # Insert final conversion entries.
+ entries=conversions(entries,account_conversions,conversion_currency,None)
+
+ returnentries
+
def cap_opt(entries, options_map):
- """Close by getting all the parameters from an options map.
-
- See cap() for details.
-
- Args:
- entries: See cap().
- options_map: A parser's option_map.
- Returns:
- Same as close().
- """
- account_types = options.get_account_types(options_map)
- current_accounts = options.get_current_accounts(options_map)
- conversion_currency = options_map['conversion_currency']
- return cap(entries,
- account_types,
- conversion_currency,
- *current_accounts)
-
+
defcap_opt(entries,options_map):
+"""Close by getting all the parameters from an options map.
+
+ See cap() for details.
+
+ Args:
+ entries: See cap().
+ options_map: A parser's option_map.
+ Returns:
+ Same as close().
+ """
+ account_types=options.get_account_types(options_map)
+ current_accounts=options.get_current_accounts(options_map)
+ conversion_currency=options_map["conversion_currency"]
+ returncap(entries,account_types,conversion_currency,*current_accounts)
+
def clamp(entries,
- begin_date, end_date,
- account_types,
- conversion_currency,
- account_earnings,
- account_opening,
- account_conversions):
- """Filter entries to include only those during a specified time period.
-
- Firstly, this method will transfer all balances for the income and expense
- accounts occurring before the given period begin date to the
- 'account_earnings' account (earnings before the period, or "retained
- earnings") and summarize all of the transactions before that date against
- the 'account_opening' account (usually "opening balances"). The resulting
- income and expense accounts should have no transactions (since their
- balances have been transferred out and summarization of zero balances should
- not add any transactions).
-
- Secondly, all the entries after the period end date will be truncated and a
- conversion entry will be added for the resulting transactions that reflect
- changes occurring between the beginning and end of the exercise period. The
- resulting balance of all account should be empty.
-
- Args:
- entries: A list of directive tuples.
- begin_date: A datetime.date instance, the beginning of the period.
- end_date: A datetime.date instance, one day beyond the end of the period.
- account_types: An instance of AccountTypes.
- conversion_currency: A string, the transfer currency to use for zero prices
- on the conversion entry.
- account_earnings: A string, the name of the account to transfer
- previous earnings from the income statement accounts to the balance
- sheet.
- account_opening: A string, the name of the account in equity
- to transfer previous balances from, in order to initialize account
- balances at the beginning of the period. This is typically called an
- opening balances account.
- account_conversions: A string, the name of the equity account to
- book currency conversions against.
- Returns:
- A new list of entries is returned, and the index that points to the first
- original transaction after the beginning date of the period. This index
- can be used to generate the opening balances report, which is a balance
- sheet fed with only the summarized entries.
- """
- # Transfer income and expenses before the period to equity.
- income_statement_account_pred = (
- lambda account: is_income_statement_account(account, account_types))
- entries = transfer_balances(entries, begin_date,
- income_statement_account_pred, account_earnings)
-
- # Summarize all the previous balances, after transferring the income and
- # expense balances, so all entries for those accounts before the begin date
- # should now disappear.
- entries, index = summarize(entries, begin_date, account_opening)
-
- # Truncate the entries after this.
- entries = truncate(entries, end_date)
-
- # Insert conversion entries.
- entries = conversions(entries, account_conversions, conversion_currency, end_date)
-
- return entries, index
-
+
defclamp(
+ entries,
+ begin_date,
+ end_date,
+ account_types,
+ conversion_currency,
+ account_earnings,
+ account_opening,
+ account_conversions,
+):
+"""Filter entries to include only those during a specified time period.
+
+ Firstly, this method will transfer all balances for the income and expense
+ accounts occurring before the given period begin date to the
+ 'account_earnings' account (earnings before the period, or "retained
+ earnings") and summarize all of the transactions before that date against
+ the 'account_opening' account (usually "opening balances"). The resulting
+ income and expense accounts should have no transactions (since their
+ balances have been transferred out and summarization of zero balances should
+ not add any transactions).
+
+ Secondly, all the entries after the period end date will be truncated and a
+ conversion entry will be added for the resulting transactions that reflect
+ changes occurring between the beginning and end of the exercise period. The
+ resulting balance of all account should be empty.
+
+ Args:
+ entries: A list of directive tuples.
+ begin_date: A datetime.date instance, the beginning of the period.
+ end_date: A datetime.date instance, one day beyond the end of the period.
+ account_types: An instance of AccountTypes.
+ conversion_currency: A string, the transfer currency to use for zero prices
+ on the conversion entry.
+ account_earnings: A string, the name of the account to transfer
+ previous earnings from the income statement accounts to the balance
+ sheet.
+ account_opening: A string, the name of the account in equity
+ to transfer previous balances from, in order to initialize account
+ balances at the beginning of the period. This is typically called an
+ opening balances account.
+ account_conversions: A string, the name of the equity account to
+ book currency conversions against.
+ Returns:
+ A new list of entries is returned, and the index that points to the first
+ original transaction after the beginning date of the period. This index
+ can be used to generate the opening balances report, which is a balance
+ sheet fed with only the summarized entries.
+ """
+ # Transfer income and expenses before the period to equity.
+ income_statement_account_pred=lambdaaccount:is_income_statement_account(
+ account,account_types
+ )
+ entries=transfer_balances(
+ entries,begin_date,income_statement_account_pred,account_earnings
+ )
+
+ # Summarize all the previous balances, after transferring the income and
+ # expense balances, so all entries for those accounts before the begin date
+ # should now disappear.
+ entries,index=summarize(entries,begin_date,account_opening)
+
+ # Truncate the entries after this.
+ entries=truncate(entries,end_date)
+
+ # Insert conversion entries.
+ entries=conversions(entries,account_conversions,conversion_currency,end_date)
+
+ returnentries,index
+
def clear(entries,
- date,
- account_types,
- account_earnings):
- """Transfer income and expenses balances at the given date to the equity accounts.
-
- This method insert entries to zero out balances on income and expenses
- accounts by transferring them to an equity account.
-
- Args:
- entries: A list of directive tuples.
- date: A datetime.date instance, one day beyond the end of the period. This
- date can be optionally left to None in order to close at the end of the
- list of entries.
- account_types: An instance of AccountTypes.
- account_earnings: A string, the name of the account to transfer
- previous earnings from the income statement accounts to the balance
- sheet.
- Returns:
- A new list of entries is returned, and the index that points to one before
- the last original transaction before the transfers.
- """
- index = len(entries)
-
- # Transfer income and expenses before the period to equity.
- income_statement_account_pred = (
- lambda account: is_income_statement_account(account, account_types))
- new_entries = transfer_balances(entries, date,
- income_statement_account_pred, account_earnings)
-
- return new_entries, index
-
+
defclear(entries,date,account_types,account_earnings):
+"""Transfer income and expenses balances at the given date to the equity accounts.
+
+ This method insert entries to zero out balances on income and expenses
+ accounts by transferring them to an equity account.
+
+ Args:
+ entries: A list of directive tuples.
+ date: A datetime.date instance, one day beyond the end of the period. This
+ date can be optionally left to None in order to close at the end of the
+ list of entries.
+ account_types: An instance of AccountTypes.
+ account_earnings: A string, the name of the account to transfer
+ previous earnings from the income statement accounts to the balance
+ sheet.
+ Returns:
+ A new list of entries is returned, and the index that points to one before
+ the last original transaction before the transfers.
+ """
+ index=len(entries)
+
+ # Transfer income and expenses before the period to equity.
+ income_statement_account_pred=lambdaaccount:is_income_statement_account(
+ account,account_types
+ )
+ new_entries=transfer_balances(
+ entries,date,income_statement_account_pred,account_earnings
+ )
+
+ returnnew_entries,index
+
def clear_opt(entries, date, options_map):
- """Convenience function to clear() using an options map.
- """
- account_types = options.get_account_types(options_map)
- current_accounts = options.get_current_accounts(options_map)
- return clear(entries, date, account_types, current_accounts[0])
-
+
defclear_opt(entries,date,options_map):
+"""Convenience function to clear() using an options map."""
+ account_types=options.get_account_types(options_map)
+ current_accounts=options.get_current_accounts(options_map)
+ returnclear(entries,date,account_types,current_accounts[0])
+
def close(entries,
- date,
- conversion_currency,
- account_conversions):
- """Truncate entries that occur after a particular date and ensure balance.
-
- This method essentially removes entries after a date. It truncates the
- future. To do so, it will
-
- 1. Remove all entries which occur after 'date', if given.
-
- 2. Insert conversion transactions at the end of the list of entries to
- ensure that the total balance of all postings sums up to empty.
-
- The result is a list of entries with a total balance of zero, with possibly
- non-zero balances for the income/expense accounts. To produce a final
- balance sheet, use transfer() to move the net income to the equity accounts.
-
- Args:
- entries: A list of directive tuples.
- date: A datetime.date instance, one day beyond the end of the period. This
- date can be optionally left to None in order to close at the end of the
- list of entries.
- conversion_currency: A string, the transfer currency to use for zero prices
- on the conversion entry.
- account_conversions: A string, the name of the equity account to
- book currency conversions against.
- Returns:
- A new list of entries is returned, and the index that points to one beyond
- the last original transaction that was provided. Further entries may have
- been inserted to normalize conversions and ensure the total balance sums
- to zero.
- """
-
- # Truncate the entries after the date, if a date has been provided.
- if date is not None:
- entries = truncate(entries, date)
-
- # Keep an index to the truncated list of entries (before conversions).
- index = len(entries)
-
- # Insert a conversions entry to ensure the total balance of all accounts is
- # flush zero.
- entries = conversions(entries, account_conversions, conversion_currency, date)
-
- return entries, index
-
+
defclose(entries,date,conversion_currency,account_conversions):
+"""Truncate entries that occur after a particular date and ensure balance.
+
+ This method essentially removes entries after a date. It truncates the
+ future. To do so, it will
+
+ 1. Remove all entries which occur after 'date', if given.
+
+ 2. Insert conversion transactions at the end of the list of entries to
+ ensure that the total balance of all postings sums up to empty.
+
+ The result is a list of entries with a total balance of zero, with possibly
+ non-zero balances for the income/expense accounts. To produce a final
+ balance sheet, use transfer() to move the net income to the equity accounts.
+
+ Args:
+ entries: A list of directive tuples.
+ date: A datetime.date instance, one day beyond the end of the period. This
+ date can be optionally left to None in order to close at the end of the
+ list of entries.
+ conversion_currency: A string, the transfer currency to use for zero prices
+ on the conversion entry.
+ account_conversions: A string, the name of the equity account to
+ book currency conversions against.
+ Returns:
+ A new list of entries is returned, and the index that points to one beyond
+ the last original transaction that was provided. Further entries may have
+ been inserted to normalize conversions and ensure the total balance sums
+ to zero.
+ """
+
+ # Truncate the entries after the date, if a date has been provided.
+ ifdateisnotNone:
+ entries=truncate(entries,date)
+
+ # Keep an index to the truncated list of entries (before conversions).
+ index=len(entries)
+
+ # Insert a conversions entry to ensure the total balance of all accounts is
+ # flush zero.
+ entries=conversions(entries,account_conversions,conversion_currency,date)
+
+ returnentries,index
+
def close_opt(entries, date, options_map):
- """Convenience function to close() using an options map.
- """
- conversion_currency = options_map['conversion_currency']
- current_accounts = options.get_current_accounts(options_map)
- return close(entries, date, conversion_currency, current_accounts[1])
-
+
defclose_opt(entries,date,options_map):
+"""Convenience function to close() using an options map."""
+ conversion_currency=options_map["conversion_currency"]
+ current_accounts=options.get_current_accounts(options_map)
+ returnclose(entries,date,conversion_currency,current_accounts[1])
+
def conversions(entries, conversion_account, conversion_currency, date=None):
- """Insert a conversion entry at date 'date' at the given account.
-
- Args:
- entries: A list of entries.
- conversion_account: A string, the account to book against.
- conversion_currency: A string, the transfer currency to use for zero prices
- on the conversion entry.
- date: The date before which to insert the conversion entry. The new
- entry will be inserted as the last entry of the date just previous
- to this date.
- Returns:
- A modified list of entries.
- """
- # Compute the balance at the given date.
- conversion_balance = interpolate.compute_entries_balance(entries, date=date)
-
- # Early exit if there is nothing to do.
- conversion_cost_balance = conversion_balance.reduce(convert.get_cost)
- if conversion_cost_balance.is_empty():
- return entries
-
- # Calculate the index and the date for the new entry. We want to store it as
- # the last transaction of the day before.
- if date is not None:
- index = bisect_key.bisect_left_with_key(entries, date, key=lambda entry: entry.date)
- last_date = date - datetime.timedelta(days=1)
- else:
- index = len(entries)
- last_date = entries[-1].date
-
- meta = data.new_metadata('<conversions>', -1)
- narration = 'Conversion for {}'.format(conversion_balance)
- conversion_entry = Transaction(meta, last_date, flags.FLAG_CONVERSIONS,
- None, narration, data.EMPTY_SET, data.EMPTY_SET, [])
- for position in conversion_cost_balance.get_positions():
- # Important note: Set the cost to zero here to maintain the balance
- # invariant. (This is the only single place we cheat on the balance rule
- # in the entire system and this is necessary; see documentation on
- # Conversions.)
- price = amount.Amount(ZERO, conversion_currency)
- neg_pos = -position
- conversion_entry.postings.append(
- data.Posting(conversion_account, neg_pos.units, neg_pos.cost,
- price, None, None))
-
- # Make a copy of the list of entries and insert the new transaction into it.
- new_entries = list(entries)
- new_entries.insert(index, conversion_entry)
-
- return new_entries
-
+
defconversions(entries,conversion_account,conversion_currency,date=None):
+"""Insert a conversion entry at date 'date' at the given account.
+
+ Args:
+ entries: A list of entries.
+ conversion_account: A string, the account to book against.
+ conversion_currency: A string, the transfer currency to use for zero prices
+ on the conversion entry.
+ date: The date before which to insert the conversion entry. The new
+ entry will be inserted as the last entry of the date just previous
+ to this date.
+ Returns:
+ A modified list of entries.
+ """
+ # Compute the balance at the given date.
+ conversion_balance=interpolate.compute_entries_balance(entries,date=date)
+
+ # Early exit if there is nothing to do.
+ conversion_cost_balance=conversion_balance.reduce(convert.get_cost)
+ ifconversion_cost_balance.is_empty():
+ returnentries
+
+ # Calculate the index and the date for the new entry. We want to store it as
+ # the last transaction of the day before.
+ ifdateisnotNone:
+ index=bisect_key.bisect_left_with_key(entries,date,key=lambdaentry:entry.date)
+ last_date=date-datetime.timedelta(days=1)
+ else:
+ index=len(entries)
+ last_date=entries[-1].date
+
+ meta=data.new_metadata("<conversions>",-1)
+ narration="Conversion for {}".format(conversion_balance)
+ conversion_entry=Transaction(
+ meta,
+ last_date,
+ flags.FLAG_CONVERSIONS,
+ None,
+ narration,
+ data.EMPTY_SET,
+ data.EMPTY_SET,
+ [],
+ )
+ forpositioninconversion_cost_balance.get_positions():
+ # Important note: Set the cost to zero here to maintain the balance
+ # invariant. (This is the only single place we cheat on the balance rule
+ # in the entire system and this is necessary; see documentation on
+ # Conversions.)
+ price=amount.Amount(ZERO,conversion_currency)
+ neg_pos=-position
+ conversion_entry.postings.append(
+ data.Posting(conversion_account,neg_pos.units,neg_pos.cost,price,None,None)
+ )
+
+ # Make a copy of the list of entries and insert the new transaction into it.
+ new_entries=list(entries)
+ new_entries.insert(index,conversion_entry)
+
+ returnnew_entries
+
def create_entries_from_balances(balances, date, source_account, direction,
- meta, flag, narration_template):
- """"Create a list of entries from a dict of balances.
-
- This method creates a list of new entries to transfer the amounts in the
- 'balances' dict to/from another account specified in 'source_account'.
-
- The balancing posting is created with the equivalent at cost. In other
- words, if you attempt to balance 10 HOOL {500 USD}, this will synthesize a
- posting with this position on one leg, and with 5000 USD on the
- 'source_account' leg.
-
- Args:
- balances: A dict of account name strings to Inventory instances.
- date: A datetime.date object, the date at which to create the transaction.
- source_account: A string, the name of the account to pull the balances
- from. This is the magician's hat to pull the rabbit from.
- direction: If 'direction' is True, the new entries transfer TO the
- balances account from the source account; otherwise the new entries
- transfer FROM the balances into the source account.
- meta: A dict to use as metadata for the transactions.
- flag: A string, the flag to use for the transactions.
- narration_template: A format string for creating the narration. It is
- formatted with 'account' and 'date' replacement variables.
- Returns:
- A list of newly synthesizes Transaction entries.
- """
- new_entries = []
- for account, account_balance in sorted(balances.items()):
-
- # Don't create new entries where there is no balance.
- if account_balance.is_empty():
- continue
-
- narration = narration_template.format(account=account, date=date)
-
- if not direction:
- account_balance = -account_balance
-
- postings = []
- new_entry = Transaction(
- meta, date, flag, None, narration, data.EMPTY_SET, data.EMPTY_SET, postings)
-
- for position in account_balance.get_positions():
- postings.append(data.Posting(account, position.units, position.cost,
- None, None, None))
- cost = -convert.get_cost(position)
- postings.append(data.Posting(source_account, cost, None,
- None, None, None))
-
- new_entries.append(new_entry)
-
- return new_entries
-
+
defcreate_entries_from_balances(
+ balances,date,source_account,direction,meta,flag,narration_template
+):
+""" "Create a list of entries from a dict of balances.
+
+ This method creates a list of new entries to transfer the amounts in the
+ 'balances' dict to/from another account specified in 'source_account'.
+
+ The balancing posting is created with the equivalent at cost. In other
+ words, if you attempt to balance 10 HOOL {500 USD}, this will synthesize a
+ posting with this position on one leg, and with 5000 USD on the
+ 'source_account' leg.
+
+ Args:
+ balances: A dict of account name strings to Inventory instances.
+ date: A datetime.date object, the date at which to create the transaction.
+ source_account: A string, the name of the account to pull the balances
+ from. This is the magician's hat to pull the rabbit from.
+ direction: If 'direction' is True, the new entries transfer TO the
+ balances account from the source account; otherwise the new entries
+ transfer FROM the balances into the source account.
+ meta: A dict to use as metadata for the transactions.
+ flag: A string, the flag to use for the transactions.
+ narration_template: A format string for creating the narration. It is
+ formatted with 'account' and 'date' replacement variables.
+ Returns:
+ A list of newly synthesizes Transaction entries.
+ """
+ new_entries=[]
+ foraccount,account_balanceinsorted(balances.items()):
+ # Don't create new entries where there is no balance.
+ ifaccount_balance.is_empty():
+ continue
+
+ narration=narration_template.format(account=account,date=date)
+
+ ifnotdirection:
+ account_balance=-account_balance
+
+ postings=[]
+ new_entry=Transaction(
+ meta,date,flag,None,narration,data.EMPTY_SET,data.EMPTY_SET,postings
+ )
+
+ forpositioninaccount_balance.get_positions():
+ postings.append(
+ data.Posting(account,position.units,position.cost,None,None,None)
+ )
+ cost=-convert.get_cost(position)
+ postings.append(data.Posting(source_account,cost,None,None,None,None))
+
+ new_entries.append(new_entry)
+
+ returnnew_entries
+
def get_open_entries(entries, date):
- """Gather the list of active Open entries at date.
-
- This returns the list of Open entries that have not been closed at the given
- date, in the same order they were observed in the document.
-
- Args:
- entries: A list of directives.
- date: The date at which to look for an open entry. If not specified, will
- return the entries still open at the latest date.
- Returns:
- A list of Open directives.
- """
- open_entries = {}
- for index, entry in enumerate(entries):
- if date is not None and entry.date >= date:
- break
-
- if isinstance(entry, Open):
- try:
- ex_index, ex_entry = open_entries[entry.account]
- if entry.date < ex_entry.date:
- open_entries[entry.account] = (index, entry)
- except KeyError:
- open_entries[entry.account] = (index, entry)
-
- elif isinstance(entry, Close):
- # If there is no corresponding open, don't raise an error.
- open_entries.pop(entry.account, None)
-
- return [entry for (index, entry) in sorted(open_entries.values())]
-
+
defget_open_entries(entries,date):
+"""Gather the list of active Open entries at date.
+
+ This returns the list of Open entries that have not been closed at the given
+ date, in the same order they were observed in the document.
+
+ Args:
+ entries: A list of directives.
+ date: The date at which to look for an open entry. If not specified, will
+ return the entries still open at the latest date.
+ Returns:
+ A list of Open directives.
+ """
+ open_entries={}
+ forindex,entryinenumerate(entries):
+ ifdateisnotNoneandentry.date>=date:
+ break
+
+ ifisinstance(entry,Open):
+ try:
+ ex_index,ex_entry=open_entries[entry.account]
+ ifentry.date<ex_entry.date:
+ open_entries[entry.account]=(index,entry)
+ exceptKeyError:
+ open_entries[entry.account]=(index,entry)
+
+ elifisinstance(entry,Close):
+ # If there is no corresponding open, don't raise an error.
+ open_entries.pop(entry.account,None)
+
+ return[entryfor(index,entry)insorted(open_entries.values())]
+
def open(entries,
- date,
- account_types,
- conversion_currency,
- account_earnings,
- account_opening,
- account_conversions):
- """Summarize entries before a date and transfer income/expenses to equity.
-
- This method essentially prepares a list of directives to contain only
- transactions that occur after a particular date. It truncates the past. To
- do so, it will
-
- 1. Insert conversion transactions at the given open date, then
-
- 2. Insert transactions at that date to move accumulated balances from before
- that date from the income and expenses accounts to an equity account, and
- finally
-
- 3. It removes all the transactions previous to the date and replaces them by
- opening balances entries to bring the balances to the same amount.
-
- The result is a list of entries for which the income and expense accounts
- are beginning with a balance of zero, and all other accounts begin with a
- transaction that brings their balance to the expected amount. All the past
- has been summarized at that point.
-
- An index is returned to the first transaction past the balance opening
- transactions, so you can keep just those in order to render a balance sheet
- for only the opening balances.
-
- Args:
- entries: A list of directive tuples.
- date: A datetime.date instance, the date at which to do this.
- account_types: An instance of AccountTypes.
- conversion_currency: A string, the transfer currency to use for zero prices
- on the conversion entry.
- account_earnings: A string, the name of the account to transfer
- previous earnings from the income statement accounts to the balance
- sheet.
- account_opening: A string, the name of the account in equity
- to transfer previous balances from, in order to initialize account
- balances at the beginning of the period. This is typically called an
- opening balances account.
- account_conversions: A string, the name of the equity account to
- book currency conversions against.
- Returns:
- A new list of entries is returned, and the index that points to the first
- original transaction after the beginning date of the period. This index
- can be used to generate the opening balances report, which is a balance
- sheet fed with only the summarized entries.
-
- """
- # Insert conversion entries.
- entries = conversions(entries, account_conversions, conversion_currency, date)
-
- # Transfer income and expenses before the period to equity.
- entries, _ = clear(entries, date, account_types, account_earnings)
-
- # Summarize all the previous balances, after transferring the income and
- # expense balances, so all entries for those accounts before the begin date
- # should now disappear.
- entries, index = summarize(entries, date, account_opening)
-
- return entries, index
-
+
defopen(
+ entries,
+ date,
+ account_types,
+ conversion_currency,
+ account_earnings,
+ account_opening,
+ account_conversions,
+):
+"""Summarize entries before a date and transfer income/expenses to equity.
+
+ This method essentially prepares a list of directives to contain only
+ transactions that occur after a particular date. It truncates the past. To
+ do so, it will
+
+ 1. Insert conversion transactions at the given open date, then
+
+ 2. Insert transactions at that date to move accumulated balances from before
+ that date from the income and expenses accounts to an equity account, and
+ finally
+
+ 3. It removes all the transactions previous to the date and replaces them by
+ opening balances entries to bring the balances to the same amount.
+
+ The result is a list of entries for which the income and expense accounts
+ are beginning with a balance of zero, and all other accounts begin with a
+ transaction that brings their balance to the expected amount. All the past
+ has been summarized at that point.
+
+ An index is returned to the first transaction past the balance opening
+ transactions, so you can keep just those in order to render a balance sheet
+ for only the opening balances.
+
+ Args:
+ entries: A list of directive tuples.
+ date: A datetime.date instance, the date at which to do this.
+ account_types: An instance of AccountTypes.
+ conversion_currency: A string, the transfer currency to use for zero prices
+ on the conversion entry.
+ account_earnings: A string, the name of the account to transfer
+ previous earnings from the income statement accounts to the balance
+ sheet.
+ account_opening: A string, the name of the account in equity
+ to transfer previous balances from, in order to initialize account
+ balances at the beginning of the period. This is typically called an
+ opening balances account.
+ account_conversions: A string, the name of the equity account to
+ book currency conversions against.
+ Returns:
+ A new list of entries is returned, and the index that points to the first
+ original transaction after the beginning date of the period. This index
+ can be used to generate the opening balances report, which is a balance
+ sheet fed with only the summarized entries.
+
+ """
+ # Insert conversion entries.
+ entries=conversions(entries,account_conversions,conversion_currency,date)
+
+ # Transfer income and expenses before the period to equity.
+ entries,_=clear(entries,date,account_types,account_earnings)
+
+ # Summarize all the previous balances, after transferring the income and
+ # expense balances, so all entries for those accounts before the begin date
+ # should now disappear.
+ entries,index=summarize(entries,date,account_opening)
+
+ returnentries,index
+
def open_opt(entries, date, options_map):
- """Convenience function to open() using an options map.
- """
- account_types = options.get_account_types(options_map)
- previous_accounts = options.get_previous_accounts(options_map)
- conversion_currency = options_map['conversion_currency']
- return open(entries, date, account_types, conversion_currency, *previous_accounts)
-
+
defopen_opt(entries,date,options_map):
+"""Convenience function to open() using an options map."""
+ account_types=options.get_account_types(options_map)
+ previous_accounts=options.get_previous_accounts(options_map)
+ conversion_currency=options_map["conversion_currency"]
+ returnopen(entries,date,account_types,conversion_currency,*previous_accounts)
+
def summarize(entries, date, account_opening):
- """Summarize all entries before a date by replacing then with summarization entries.
-
- This function replaces the transactions up to (and not including) the given
- date with a opening balance transactions, one for each account. It returns
- new entries, all of the transactions before the given date having been
- replaced by a few summarization entries, one for each account.
-
- Notes:
- - Open entries are preserved for active accounts.
- - The last relevant price entry for each (base, quote) pair is preserved.
- - All other entries before the cutoff date are culled.
-
- Args:
- entries: A list of directives.
- date: A datetime.date instance, the cutoff date before which to summarize.
- account_opening: A string, the name of the source account to book summarization
- entries against.
- Returns:
- The function returns a list of new entries and the integer index at which
- the entries on or after the cutoff date begin.
- """
- # Compute balances at date.
- balances, index = balance_by_account(entries, date)
-
- # We need to insert the entries with a date previous to subsequent checks,
- # to maintain ensure the open directives show up before any transaction.
- summarize_date = date - datetime.timedelta(days=1)
-
- # Create summarization / opening balance entries.
- summarizing_entries = create_entries_from_balances(
- balances, summarize_date, account_opening, True,
- data.new_metadata('<summarize>', 0), flags.FLAG_SUMMARIZE,
- "Opening balance for '{account}' (Summarization)")
-
- # Insert the last price entry for each commodity from before the date.
- price_entries = prices.get_last_price_entries(entries, date)
-
- # Gather the list of active open entries at date.
- open_entries = get_open_entries(entries, date)
-
- # Compute entries before the date and preserve the entries after the date.
- before_entries = sorted(open_entries + price_entries + summarizing_entries,
- key=data.entry_sortkey)
- after_entries = entries[index:]
-
- # Return a new list of entries and the index that points after the entries
- # were inserted.
- return (before_entries + after_entries), len(before_entries)
-
+
defsummarize(entries,date,account_opening):
+"""Summarize all entries before a date by replacing then with summarization entries.
+
+ This function replaces the transactions up to (and not including) the given
+ date with a opening balance transactions, one for each account. It returns
+ new entries, all of the transactions before the given date having been
+ replaced by a few summarization entries, one for each account.
+
+ Notes:
+ - Open entries are preserved for active accounts.
+ - The last relevant price entry for each (base, quote) pair is preserved.
+ - All other entries before the cutoff date are culled.
+
+ Args:
+ entries: A list of directives.
+ date: A datetime.date instance, the cutoff date before which to summarize.
+ account_opening: A string, the name of the source account to book summarization
+ entries against.
+ Returns:
+ The function returns a list of new entries and the integer index at which
+ the entries on or after the cutoff date begin.
+ """
+ # Compute balances at date.
+ balances,index=balance_by_account(entries,date)
+
+ # We need to insert the entries with a date previous to subsequent checks,
+ # to maintain ensure the open directives show up before any transaction.
+ summarize_date=date-datetime.timedelta(days=1)
+
+ # Create summarization / opening balance entries.
+ summarizing_entries=create_entries_from_balances(
+ balances,
+ summarize_date,
+ account_opening,
+ True,
+ data.new_metadata("<summarize>",0),
+ flags.FLAG_SUMMARIZE,
+ "Opening balance for '{account}' (Summarization)",
+ )
+
+ # Insert the last price entry for each commodity from before the date.
+ price_entries=prices.get_last_price_entries(entries,date)
+
+ # Gather the list of active open entries at date.
+ open_entries=get_open_entries(entries,date)
+
+ # Compute entries before the date and preserve the entries after the date.
+ before_entries=sorted(
+ open_entries+price_entries+summarizing_entries,key=data.entry_sortkey
+ )
+ after_entries=entries[index:]
+
+ # Return a new list of entries and the index that points after the entries
+ # were inserted.
+ return(before_entries+after_entries),len(before_entries)
+
def transfer_balances(entries, date, account_pred, transfer_account):
- """Synthesize transactions to transfer balances from some accounts at a given date.
-
- For all accounts that match the 'account_pred' predicate, create new entries
- to transfer the balance at the given date from the account to the transfer
- account. This is used to transfer balances from income and expenses from a
- previous period to a "retained earnings" account. This is accomplished by
- creating new entries.
-
- Note that inserting transfers breaks any following balance checks that are
- in the transferred accounts. For this reason, all balance assertion entries
- following the cutoff date for those accounts are removed from the list in
- output.
-
- Args:
- entries: A list of directives.
- date: A datetime.date instance, the date at which to make the transfer.
- account_pred: A predicate function that, given an account string, returns
- true if the account is meant to be transferred.
- transfer_account: A string, the name of the source account to be used on
- the transfer entries to receive balances at the given date.
- Returns:
- A new list of entries, with the new transfer entries added in.
- """
- # Don't bother doing anything if there are no entries.
- if not entries:
- return entries
-
- # Compute balances at date.
- balances, index = balance_by_account(entries, date)
-
- # Filter out to keep only the accounts we want.
- transfer_balances = {account: balance
- for account, balance in balances.items()
- if account_pred(account)}
-
- # We need to insert the entries at the end of the previous day.
- if date:
- transfer_date = date - datetime.timedelta(days=1)
- else:
- transfer_date = entries[-1].date
-
- # Create transfer entries.
- transfer_entries = create_entries_from_balances(
- transfer_balances, transfer_date, transfer_account, False,
- data.new_metadata('<transfer_balances>', 0), flags.FLAG_TRANSFER,
- "Transfer balance for '{account}' (Transfer balance)")
-
- # Remove balance assertions that occur after a transfer on an account that
- # has been transferred away; they would break.
- after_entries = [entry
- for entry in entries[index:]
- if not (isinstance(entry, balance.Balance) and
- entry.account in transfer_balances)]
-
- # Split the new entries in a new list.
- return (entries[:index] + transfer_entries + after_entries)
-
+
deftransfer_balances(entries,date,account_pred,transfer_account):
+"""Synthesize transactions to transfer balances from some accounts at a given date.
+
+ For all accounts that match the 'account_pred' predicate, create new entries
+ to transfer the balance at the given date from the account to the transfer
+ account. This is used to transfer balances from income and expenses from a
+ previous period to a "retained earnings" account. This is accomplished by
+ creating new entries.
+
+ Note that inserting transfers breaks any following balance checks that are
+ in the transferred accounts. For this reason, all balance assertion entries
+ following the cutoff date for those accounts are removed from the list in
+ output.
+
+ Args:
+ entries: A list of directives.
+ date: A datetime.date instance, the date at which to make the transfer.
+ account_pred: A predicate function that, given an account string, returns
+ true if the account is meant to be transferred.
+ transfer_account: A string, the name of the source account to be used on
+ the transfer entries to receive balances at the given date.
+ Returns:
+ A new list of entries, with the new transfer entries added in.
+ """
+ # Don't bother doing anything if there are no entries.
+ ifnotentries:
+ returnentries
+
+ # Compute balances at date.
+ balances,index=balance_by_account(entries,date)
+
+ # Filter out to keep only the accounts we want.
+ transfer_balances={
+ account:balanceforaccount,balanceinbalances.items()ifaccount_pred(account)
+ }
+
+ # We need to insert the entries at the end of the previous day.
+ ifdate:
+ transfer_date=date-datetime.timedelta(days=1)
+ else:
+ transfer_date=entries[-1].date
+
+ # Create transfer entries.
+ transfer_entries=create_entries_from_balances(
+ transfer_balances,
+ transfer_date,
+ transfer_account,
+ False,
+ data.new_metadata("<transfer_balances>",0),
+ flags.FLAG_TRANSFER,
+ "Transfer balance for '{account}' (Transfer balance)",
+ )
+
+ # Remove balance assertions that occur after a transfer on an account that
+ # has been transferred away; they would break.
+ after_entries=[
+ entry
+ forentryinentries[index:]
+ ifnot(isinstance(entry,data.Balance)andentry.accountintransfer_balances)
+ ]
+
+ # Split the new entries in a new list.
+ returnentries[:index]+transfer_entries+after_entries
+
def truncate(entries, date):
- """Filter out all the entries at and after date. Returns a new list of entries.
-
- Args:
- entries: A sorted list of directives.
- date: A datetime.date instance.
- Returns:
- A truncated list of directives.
- """
- index = bisect_key.bisect_left_with_key(entries, date,
- key=lambda entry: entry.date)
- return entries[:index]
-
+
deftruncate(entries,date):
+"""Filter out all the entries at and after date. Returns a new list of entries.
+
+ Args:
+ entries: A sorted list of directives.
+ date: A datetime.date instance.
+ Returns:
+ A truncated list of directives.
+ """
+ index=bisect_key.bisect_left_with_key(entries,date,key=lambdaentry:entry.date)
+ returnentries[:index]
+
def validate(entries, options_map, log_timings=None, extra_validations=None):
- """Perform all the standard checks on parsed contents.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- log_timings: An optional function to use for logging the time of individual
- operations.
- extra_validations: A list of extra validation functions to run after loading
- this list of entries.
- Returns:
- A list of new errors, if any were found.
- """
- validation_tests = VALIDATIONS
- if extra_validations:
- validation_tests += extra_validations
-
- # Run various validation routines define above.
- errors = []
- for validation_function in validation_tests:
- with misc_utils.log_time('function: {}'.format(validation_function.__name__),
- log_timings, indent=2):
- new_errors = validation_function(entries, options_map)
- errors.extend(new_errors)
-
- return errors
-
+
defvalidate(entries,options_map,log_timings=None,extra_validations=None):
+"""Perform all the standard checks on parsed contents.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ log_timings: An optional function to use for logging the time of individual
+ operations.
+ extra_validations: A list of extra validation functions to run after loading
+ this list of entries.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ validation_tests=VALIDATIONS
+ ifextra_validations:
+ validation_tests+=extra_validations
+
+ # Run various validation routines define above.
+ errors=[]
+ forvalidation_functioninvalidation_tests:
+ withmisc_utils.log_time(
+ "function: {}".format(validation_function.__name__),log_timings,indent=2
+ ):
+ new_errors=validation_function(entries,options_map)
+ errors.extend(new_errors)
+
+ returnerrors
+
def validate_active_accounts(entries, unused_options_map):
- """Check that all references to accounts occurs on active accounts.
-
- We basically check that references to accounts from all directives other
- than Open and Close occur at dates the open-close interval of that account.
- This should be good for all of the directive types where we can extract an
- account name.
-
- Note that this is more strict a check than comparing the dates: we actually
- check that no references to account are made on the same day before the open
- directive appears for that account. This is a nice property to have, and is
- supported by our custom sorting routine that will sort open entries before
- transaction entries, given the same date.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- error_pairs = []
- active_set = set()
- opened_accounts = set()
- for entry in entries:
- if isinstance(entry, data.Open):
- active_set.add(entry.account)
- opened_accounts.add(entry.account)
-
- elif isinstance(entry, data.Close):
- active_set.discard(entry.account)
-
- else:
- for account in getters.get_entry_accounts(entry):
- if account not in active_set:
- # Allow document and note directives that occur after an
- # account is closed.
- if (isinstance(entry, ALLOW_AFTER_CLOSE) and
- account in opened_accounts):
- continue
-
- # Register an error to be logged later, with an appropriate
- # message.
- error_pairs.append((account, entry))
-
- # Refine the error message to disambiguate between the case of an account
- # that has never been seen and one that was simply not active at the time.
- errors = []
- for account, entry in error_pairs:
- if account in opened_accounts:
- message = "Invalid reference to inactive account '{}'".format(account)
- else:
- message = "Invalid reference to unknown account '{}'".format(account)
- errors.append(ValidationError(entry.meta, message, entry))
-
- return errors
-
+
defvalidate_active_accounts(entries,unused_options_map):
+"""Check that all references to accounts occurs on active accounts.
+
+ We basically check that references to accounts from all directives other
+ than Open and Close occur at dates the open-close interval of that account.
+ This should be good for all of the directive types where we can extract an
+ account name.
+
+ Note that this is more strict a check than comparing the dates: we actually
+ check that no references to account are made on the same day before the open
+ directive appears for that account. This is a nice property to have, and is
+ supported by our custom sorting routine that will sort open entries before
+ transaction entries, given the same date.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ error_pairs=[]
+ active_set=set()
+ opened_accounts=set()
+ forentryinentries:
+ ifisinstance(entry,data.Open):
+ active_set.add(entry.account)
+ opened_accounts.add(entry.account)
+
+ elifisinstance(entry,data.Close):
+ active_set.discard(entry.account)
+
+ else:
+ foraccountingetters.get_entry_accounts(entry):
+ ifaccountnotinactive_set:
+ # Allow document and note directives that occur after an
+ # account is closed.
+ ifisinstance(entry,ALLOW_AFTER_CLOSE)andaccountinopened_accounts:
+ continue
+
+ # Register an error to be logged later, with an appropriate
+ # message.
+ error_pairs.append((account,entry))
+
+ # Refine the error message to disambiguate between the case of an account
+ # that has never been seen and one that was simply not active at the time.
+ errors=[]
+ foraccount,entryinerror_pairs:
+ ifaccountinopened_accounts:
+ message="Invalid reference to inactive account '{}'".format(account)
+ else:
+ message="Invalid reference to unknown account '{}'".format(account)
+ errors.append(ValidationError(entry.meta,message,entry))
+
+ returnerrors
+
def validate_check_transaction_balances(entries, options_map):
- """Check again that all transaction postings balance, as users may have
- transformed transactions.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- # Note: this is a bit slow; we could limit our checks to the original
- # transactions by using the hash function in the loader.
- errors = []
- for entry in entries:
- if isinstance(entry, Transaction):
- # IMPORTANT: This validation is _crucial_ and cannot be skipped.
- # This is where we actually detect and warn on unbalancing
- # transactions. This _must_ come after the user routines, because
- # unbalancing input is legal, as those types of transactions may be
- # "fixed up" by a user-plugin. In other words, we want to allow
- # users to input unbalancing transactions as long as the final
- # transactions objects that appear on the stream (after processing
- # the plugins) are balanced. See {9e6c14b51a59}.
- #
- # Detect complete sets of postings that have residual balance;
- residual = interpolate.compute_residual(entry.postings)
- tolerances = interpolate.infer_tolerances(entry.postings, options_map)
- if not residual.is_small(tolerances):
- errors.append(
- ValidationError(entry.meta,
- "Transaction does not balance: {}".format(residual),
- entry))
-
- return errors
-
+
defvalidate_check_transaction_balances(entries,options_map):
+"""Check again that all transaction postings balance, as users may have
+ transformed transactions.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ # Note: this is a bit slow; we could limit our checks to the original
+ # transactions by using the hash function in the loader.
+ errors=[]
+ forentryinentries:
+ ifisinstance(entry,Transaction):
+ # IMPORTANT: This validation is _crucial_ and cannot be skipped.
+ # This is where we actually detect and warn on unbalancing
+ # transactions. This _must_ come after the user routines, because
+ # unbalancing input is legal, as those types of transactions may be
+ # "fixed up" by a user-plugin. In other words, we want to allow
+ # users to input unbalancing transactions as long as the final
+ # transactions objects that appear on the stream (after processing
+ # the plugins) are balanced. See {9e6c14b51a59}.
+ #
+ # Detect complete sets of postings that have residual balance;
+ residual=interpolate.compute_residual(entry.postings)
+ tolerances=interpolate.infer_tolerances(entry.postings,options_map)
+ ifnotresidual.is_small(tolerances):
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Transaction does not balance: {}".format(residual),
+ entry,
+ )
+ )
+
+ returnerrors
+
def validate_currency_constraints(entries, options_map):
- """Check the currency constraints from account open declarations.
-
- Open directives admit an optional list of currencies that specify the only
- types of commodities that the running inventory for this account may
- contain. This function checks that all postings are only made in those
- commodities.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
-
- # Get all the open entries with currency constraints.
- open_map = {entry.account: entry
- for entry in entries
- if isinstance(entry, Open) and entry.currencies}
-
- errors = []
- for entry in entries:
- if not isinstance(entry, Transaction):
- continue
-
- for posting in entry.postings:
- # Look up the corresponding account's valid currencies; skip the
- # check if there are none specified.
- try:
- open_entry = open_map[posting.account]
- valid_currencies = open_entry.currencies
- if not valid_currencies:
- continue
- except KeyError:
- continue
-
- # Perform the check.
- if posting.units.currency not in valid_currencies:
- errors.append(
- ValidationError(
- entry.meta,
- "Invalid currency {} for account '{}'".format(
- posting.units.currency, posting.account),
- entry))
-
- return errors
-
+
defvalidate_currency_constraints(entries,options_map):
+"""Check the currency constraints from account open declarations.
+
+ Open directives admit an optional list of currencies that specify the only
+ types of commodities that the running inventory for this account may
+ contain. This function checks that all postings are only made in those
+ commodities.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+
+ # Get all the open entries with currency constraints.
+ open_map={
+ entry.account:entry
+ forentryinentries
+ ifisinstance(entry,Open)andentry.currencies
+ }
+
+ errors=[]
+ forentryinentries:
+ ifnotisinstance(entry,Transaction):
+ continue
+
+ forpostinginentry.postings:
+ # Look up the corresponding account's valid currencies; skip the
+ # check if there are none specified.
+ try:
+ open_entry=open_map[posting.account]
+ valid_currencies=open_entry.currencies
+ ifnotvalid_currencies:
+ continue
+ exceptKeyError:
+ continue
+
+ # Perform the check.
+ ifposting.units.currencynotinvalid_currencies:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Invalid currency {} for account '{}'".format(
+ posting.units.currency,posting.account
+ ),
+ entry,
+ )
+ )
+
+ returnerrors
+
def validate_data_types(entries, options_map):
- """Check that all the data types of the attributes of entries are as expected.
-
- Users are provided with a means to filter the list of entries. They're able to
- write code that manipulates those tuple objects without any type constraints.
- With discipline, this mostly works, but I know better: check, just to make sure.
- This routine checks all the data types and assumptions on entries.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- errors = []
- for entry in entries:
- try:
- data.sanity_check_types(
- entry, options_map["allow_deprecated_none_for_tags_and_links"])
- except AssertionError as exc:
- errors.append(
- ValidationError(entry.meta,
- "Invalid data types: {}".format(exc),
- entry))
- return errors
-
+
defvalidate_data_types(entries,options_map):
+"""Check that all the data types of the attributes of entries are as expected.
+
+ Users are provided with a means to filter the list of entries. They're able to
+ write code that manipulates those tuple objects without any type constraints.
+ With discipline, this mostly works, but I know better: check, just to make sure.
+ This routine checks all the data types and assumptions on entries.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ errors=[]
+ forentryinentries:
+ try:
+ data.sanity_check_types(
+ entry,options_map["allow_deprecated_none_for_tags_and_links"]
+ )
+ exceptAssertionErrorasexc:
+ errors.append(
+ ValidationError(entry.meta,"Invalid data types: {}".format(exc),entry)
+ )
+ returnerrors
+
def validate_documents_paths(entries, options_map):
- """Check that all filenames in resolved Document entries are absolute filenames.
-
- The processing of document entries is assumed to result in absolute paths.
- Relative paths are resolved at the parsing stage and at point we want to
- make sure we don't have to do any further processing on them.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- return [ValidationError(entry.meta, "Invalid relative path for entry", entry)
- for entry in entries
- if (isinstance(entry, Document) and
- not path.isabs(entry.filename))]
-
+
defvalidate_documents_paths(entries,options_map):
+"""Check that all filenames in resolved Document entries are absolute filenames.
+
+ The processing of document entries is assumed to result in absolute paths.
+ Relative paths are resolved at the parsing stage and at point we want to
+ make sure we don't have to do any further processing on them.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ return[
+ ValidationError(entry.meta,"Invalid relative path for entry",entry)
+ forentryinentries
+ if(isinstance(entry,Document)andnotpath.isabs(entry.filename))
+ ]
+
def validate_duplicate_balances(entries, unused_options_map):
- """Check that balance entries occur only once per day.
-
- Because we do not support time, and the declaration order of entries is
- meant to be kept irrelevant, two balance entries with different amounts
- should not occur in the file. We do allow two identical balance assertions,
- however, because this may occur during import.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- errors = []
-
- # Mapping of (account, currency, date) to Balance entry.
- balance_entries = {}
- for entry in entries:
- if not isinstance(entry, data.Balance):
- continue
-
- key = (entry.account, entry.amount.currency, entry.date)
- try:
- previous_entry = balance_entries[key]
- if entry.amount != previous_entry.amount:
- errors.append(
- ValidationError(
- entry.meta,
- "Duplicate balance assertion with different amounts",
- entry))
- except KeyError:
- balance_entries[key] = entry
-
- return errors
-
+
defvalidate_duplicate_balances(entries,unused_options_map):
+"""Check that balance entries occur only once per day.
+
+ Because we do not support time, and the declaration order of entries is
+ meant to be kept irrelevant, two balance entries with different amounts
+ should not occur in the file. We do allow two identical balance assertions,
+ however, because this may occur during import.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ errors=[]
+
+ # Mapping of (account, currency, date) to Balance entry.
+ balance_entries={}
+ forentryinentries:
+ ifnotisinstance(entry,data.Balance):
+ continue
+
+ key=(entry.account,entry.amount.currency,entry.date)
+ try:
+ previous_entry=balance_entries[key]
+ ifentry.amount!=previous_entry.amount:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Duplicate balance assertion with different amounts",
+ entry,
+ )
+ )
+ exceptKeyError:
+ balance_entries[key]=entry
+
+ returnerrors
+
def validate_duplicate_commodities(entries, unused_options_map):
- """Check that commodity entries are unique for each commodity.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- errors = []
-
- # Mapping of (account, currency, date) to Balance entry.
- commodity_entries = {}
- for entry in entries:
- if not isinstance(entry, data.Commodity):
- continue
-
- key = entry.currency
- try:
- previous_entry = commodity_entries[key]
- if previous_entry:
- errors.append(
- ValidationError(
- entry.meta,
- "Duplicate commodity directives for '{}'".format(key),
- entry))
- except KeyError:
- commodity_entries[key] = entry
-
- return errors
-
+
defvalidate_duplicate_commodities(entries,unused_options_map):
+"""Check that commodity entries are unique for each commodity.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ errors=[]
+
+ # Mapping of (account, currency, date) to Balance entry.
+ commodity_entries={}
+ forentryinentries:
+ ifnotisinstance(entry,data.Commodity):
+ continue
+
+ key=entry.currency
+ try:
+ previous_entry=commodity_entries[key]
+ ifprevious_entry:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Duplicate commodity directives for '{}'".format(key),
+ entry,
+ )
+ )
+ exceptKeyError:
+ commodity_entries[key]=entry
+
+ returnerrors
+
Close directives may only appears if an open directive has been seen
- previous (chronologically).
+
Close directives may only appear if an open directive has been seen
+ previously (chronologically).
The date of close directives must be strictly greater than their
- corresponding open directive.
+ corresponding open directive.
@@ -6494,69 +6026,76 @@
Source code in beancount/ops/validation.py
-
def validate_open_close(entries, unused_options_map):
- """Check constraints on open and close directives themselves.
-
- This method checks two kinds of constraints:
-
- 1. An open or a close directive may only show up once for each account. If a
- duplicate is detected, an error is generated.
-
- 2. Close directives may only appears if an open directive has been seen
- previous (chronologically).
-
- 3. The date of close directives must be strictly greater than their
- corresponding open directive.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of new errors, if any were found.
- """
- errors = []
- open_map = {}
- close_map = {}
- for entry in entries:
-
- if isinstance(entry, Open):
- if entry.account in open_map:
- errors.append(
- ValidationError(
- entry.meta,
- "Duplicate open directive for {}".format(entry.account),
- entry))
- else:
- open_map[entry.account] = entry
-
- elif isinstance(entry, Close):
- if entry.account in close_map:
- errors.append(
- ValidationError(
- entry.meta,
- "Duplicate close directive for {}".format(entry.account),
- entry))
- else:
- try:
- open_entry = open_map[entry.account]
- if entry.date <= open_entry.date:
- errors.append(
- ValidationError(
- entry.meta,
- "Internal error: closing date for {} "
- "appears before opening date".format(entry.account),
- entry))
- except KeyError:
- errors.append(
- ValidationError(
- entry.meta,
- "Unopened account {} is being closed".format(entry.account),
- entry))
-
- close_map[entry.account] = entry
-
- return errors
-
+
defvalidate_open_close(entries,unused_options_map):
+"""Check constraints on open and close directives themselves.
+
+ This method checks two kinds of constraints:
+
+ 1. An open or a close directive may only show up once for each account. If a
+ duplicate is detected, an error is generated.
+
+ 2. Close directives may only appear if an open directive has been seen
+ previously (chronologically).
+
+ 3. The date of close directives must be strictly greater than their
+ corresponding open directive.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of new errors, if any were found.
+ """
+ errors=[]
+ open_map={}
+ close_map={}
+ forentryinentries:
+ ifisinstance(entry,Open):
+ ifentry.accountinopen_map:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Duplicate open directive for {}".format(entry.account),
+ entry,
+ )
+ )
+ else:
+ open_map[entry.account]=entry
+
+ elifisinstance(entry,Close):
+ ifentry.accountinclose_map:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Duplicate close directive for {}".format(entry.account),
+ entry,
+ )
+ )
+ else:
+ try:
+ open_entry=open_map[entry.account]
+ ifentry.date<open_entry.date:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Internal error: closing date for {} "
+ "appears before opening date".format(entry.account),
+ entry,
+ )
+ )
+ exceptKeyError:
+ errors.append(
+ ValidationError(
+ entry.meta,
+ "Unopened account {} is being closed".format(entry.account),
+ entry,
+ )
+ )
+
+ close_map[entry.account]=entry
+
+ returnerrors
+
incomplete_entries – A list of directives, with some postings possibly left
with incomplete amounts as produced by the parser.
options_map – An options dict as produced by the parser.
+
initial_balances – A dict of (account, inventory) pairs to start booking from.
+This is useful when attempting to book on top of an existing state.
@@ -892,33 +894,257 @@
Source code in beancount/parser/booking.py
-
def book(incomplete_entries, options_map):
- """Book inventory lots and complete all positions with incomplete numbers.
-
- Args:
- incomplete_entries: A list of directives, with some postings possibly left
- with incomplete amounts as produced by the parser.
- options_map: An options dict as produced by the parser.
- Returns:
- A pair of
- entries: A list of completed entries with all their postings completed.
- errors: New errors produced during interpolation.
- """
- # Get the list of booking methods for each account.
- booking_methods = collections.defaultdict(lambda: options_map["booking_method"])
- for entry in incomplete_entries:
- if isinstance(entry, data.Open) and entry.booking:
- booking_methods[entry.account] = entry.booking
-
- # Do the booking here!
- entries, booking_errors = booking_full.book(incomplete_entries, options_map,
- booking_methods)
-
- # Check for MISSING elements remaining.
- missing_errors = validate_missing_eliminated(entries, options_map)
-
- return entries, (booking_errors + missing_errors)
-
+
defbook(incomplete_entries,options_map,initial_balances=None):
+"""Book inventory lots and complete all positions with incomplete numbers.
+
+ Args:
+ incomplete_entries: A list of directives, with some postings possibly left
+ with incomplete amounts as produced by the parser.
+ options_map: An options dict as produced by the parser.
+ initial_balances: A dict of (account, inventory) pairs to start booking from.
+ This is useful when attempting to book on top of an existing state.
+ Returns:
+ A pair of
+ entries: A list of completed entries with all their postings completed.
+ errors: New errors produced during interpolation.
+ """
+ # Get the list of booking methods for each account.
+ booking_methods=collections.defaultdict(lambda:options_map["booking_method"])
+ forentryinincomplete_entries:
+ ifisinstance(entry,data.Open)andentry.booking:
+ booking_methods[entry.account]=entry.booking
+
+ # Do the booking here!
+ entries,booking_errors=booking_full.book(
+ incomplete_entries,options_map,booking_methods,initial_balances
+ )
+
+ # Check for MISSING elements remaining.
+ missing_errors=validate_missing_eliminated(entries,options_map)
+
+ returnentries,(booking_errors+missing_errors)
+
For all the entries, convert the posting's position's CostSpec to Cost
+instances. In the simple method, the data provided in the CostSpec must
+unambiguously provide a way to compute the cost amount.
+
This essentially replicates the way the old parser used to work, but
+allowing positions to have the fuzzy lot specifications instead of the
+resolved ones. We used to simply compute the costs locally, and this gets
+rid of the CostSpec to produce the Cost without fuzzy matching. This is only
+there for the sake of transition to the new matching logic.
+
+
+
+
+
+
+
+
+
Parameters:
+
+
+
entries – A list of incomplete directives as per the parser.
+
+
+
+
+
+
+
+
+
+
+
+
+
Returns:
+
+
+
A list of entries whose postings's position costs have been converted to
+Cost instances but that may still be incomplete.
+
+
+
+
+
+
+
+
+
+
+
+
+
Exceptions:
+
+
+
ValueError – If there's a unacceptable number.
+
+
+
+
+
+
+ Source code in beancount/parser/booking.py
+
defconvert_lot_specs_to_lots(entries):
+"""For all the entries, convert the posting's position's CostSpec to Cost
+ instances. In the simple method, the data provided in the CostSpec must
+ unambiguously provide a way to compute the cost amount.
+
+ This essentially replicates the way the old parser used to work, but
+ allowing positions to have the fuzzy lot specifications instead of the
+ resolved ones. We used to simply compute the costs locally, and this gets
+ rid of the CostSpec to produce the Cost without fuzzy matching. This is only
+ there for the sake of transition to the new matching logic.
+
+ Args:
+ entries: A list of incomplete directives as per the parser.
+ Returns:
+ A list of entries whose postings's position costs have been converted to
+ Cost instances but that may still be incomplete.
+ Raises:
+ ValueError: If there's a unacceptable number.
+ """
+ new_entries=[]
+ errors=[]
+ forentryinentries:
+ ifnotisinstance(entry,data.Transaction):
+ new_entries.append(entry)
+ continue
+
+ new_postings=[]
+ forpostinginentry.postings:
+ try:
+ units=posting.units
+ cost_spec=posting.cost
+ cost=convert_spec_to_cost(units,cost_spec)
+ ifcost_specisnotNoneandcostisNone:
+ errors.append(
+ BookingError(
+ entry.meta,"Cost syntax not supported; cost spec ignored",None
+ )
+ )
+
+ ifcostandisinstance(units,amount.Amount):
+ # If there is a cost, we don't allow either a cost value of
+ # zero, nor a zero number of units. Note that we allow a price
+ # of zero as the only special case (for conversion entries), but
+ # never for costs.
+ ifunits.number==ZERO:
+ raiseValueError('Amount is zero: "{}"'.format(units))
+ ifcost.numberisnotNoneandcost.number<ZERO:
+ raiseValueError('Cost is negative: "{}"'.format(cost))
+ exceptValueErrorasexc:
+ errors.append(BookingError(entry.meta,str(exc),None))
+ cost=None
+ new_postings.append(posting._replace(cost=cost))
+ new_entries.append(entry._replace(postings=new_postings))
+ returnnew_entries,errors
+
def validate_inventory_booking(entries, unused_options_map, booking_methods):
- """Validate that no position at cost is allowed to go negative.
-
- This routine checks that when a posting reduces a position, existing or not,
- that the subsequent inventory does not result in a position with a negative
- number of units. A negative number of units would only be required for short
- trades of trading spreads on futures, and right now this is not supported.
- It would not be difficult to support this, however, but we want to be strict
- about it, because being pedantic about this is otherwise a great way to
- detect user data entry mistakes.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- booking_methods: A mapping of account name to booking method, accumulated
- in the main loop.
- Returns:
- A list of errors.
-
- """
- errors = []
- balances = collections.defaultdict(inventory.Inventory)
- for entry in entries:
- if isinstance(entry, data.Transaction):
- for posting in entry.postings:
- # Update the balance of each posting on its respective account
- # without allowing booking to a negative position, and if an error
- # is encountered, catch it and return it.
- running_balance = balances[posting.account]
- position_, _ = running_balance.add_position(posting)
-
- # Skip this check if the booking method is set to ignore it.
- if booking_methods.get(posting.account, None) == data.Booking.NONE:
- continue
-
- # Check if the resulting inventory is mixed, which is not
- # allowed under the STRICT method.
- if running_balance.is_mixed():
- errors.append(
- BookingError(
- entry.meta,
- ("Reducing position results in inventory with positive "
- "and negative lots: {}").format(position_),
- entry))
-
- return errors
-
+
defvalidate_inventory_booking(entries,unused_options_map,booking_methods):
+"""Validate that no position at cost is allowed to go negative.
+
+ This routine checks that when a posting reduces a position, existing or not,
+ that the subsequent inventory does not result in a position with a negative
+ number of units. A negative number of units would only be required for short
+ trades of trading spreads on futures, and right now this is not supported.
+ It would not be difficult to support this, however, but we want to be strict
+ about it, because being pedantic about this is otherwise a great way to
+ detect user data entry mistakes.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ booking_methods: A mapping of account name to booking method, accumulated
+ in the main loop.
+ Returns:
+ A list of errors.
+
+ """
+ errors=[]
+ balances=collections.defaultdict(inventory.Inventory)
+ forentryinentries:
+ ifisinstance(entry,data.Transaction):
+ forpostinginentry.postings:
+ # Update the balance of each posting on its respective account
+ # without allowing booking to a negative position, and if an error
+ # is encountered, catch it and return it.
+ running_balance=balances[posting.account]
+ position_,_=running_balance.add_position(posting)
+
+ # Skip this check if the booking method is set to ignore it.
+ ifbooking_methods.get(posting.account,None)==data.Booking.NONE:
+ continue
+
+ # Check if the resulting inventory is mixed, which is not
+ # allowed under the STRICT method.
+ ifrunning_balance.is_mixed():
+ errors.append(
+ BookingError(
+ entry.meta,
+ (
+ "Reducing position results in inventory with positive "
+ "and negative lots: {}"
+ ).format(position_),
+ entry,
+ )
+ )
+
+ returnerrors
+
def validate_missing_eliminated(entries, unused_options_map):
- """Validate that all the missing bits of postings have been eliminated.
-
- Args:
- entries: A list of directives.
- unused_options_map: An options map.
- Returns:
- A list of errors.
- """
- errors = []
- for entry in entries:
- if isinstance(entry, data.Transaction):
- for posting in entry.postings:
- units = posting.units
- cost = posting.cost
- if (MISSING in (units.number, units.currency) or
- cost is not None and MISSING in (cost.number, cost.currency,
- cost.date, cost.label)):
- errors.append(
- BookingError(entry.meta,
- "Transaction has incomplete elements",
- entry))
- break
- return errors
-
+
defvalidate_missing_eliminated(entries,unused_options_map):
+"""Validate that all the missing bits of postings have been eliminated.
+
+ Args:
+ entries: A list of directives.
+ unused_options_map: An options map.
+ Returns:
+ A list of errors.
+ """
+ errors=[]
+ forentryinentries:
+ ifisinstance(entry,data.Transaction):
+ forpostinginentry.postings:
+ units=posting.units
+ cost=posting.cost
+ if(
+ MISSINGin(units.number,units.currency)
+ orcostisnotNone
+ andMISSINGin(cost.number,cost.currency,cost.date,cost.label)
+ ):
+ errors.append(
+ BookingError(
+ entry.meta,"Transaction has incomplete elements",entry
+ )
+ )
+ break
+ returnerrors
+
def book(entries, options_map, methods):
- """Interpolate missing data from the entries using the full historical algorithm.
- See the internal implementation _book() for details.
- This method only stripes some of the return values.
-
- See _book() for arguments and return values.
- """
- entries, errors, _ = _book(entries, options_map, methods)
- return entries, errors
-
+
defbook(entries,options_map,methods,initial_balances=None):
+"""Interpolate missing data from the entries using the full historical algorithm.
+ See the internal implementation _book() for details.
+ This method only stripes some of the return values.
+
+ See _book() for arguments and return values.
+ """
+ entries,errors,_=_book(entries,options_map,methods,initial_balances)
+ returnentries,errors
+
def book_reductions(entry, group_postings, balances,
- methods):
- """Book inventory reductions against the ante-balances.
-
- This function accepts a dict of (account, Inventory balance) and for each
- posting that is a reduction against its inventory, attempts to find a
- corresponding lot or list of lots to reduce the balance with.
-
- * For reducing lots, the CostSpec instance of the posting is replaced by a
- Cost instance.
-
- * For augmenting lots, the CostSpec instance of the posting is left alone,
- except for its date, which is inherited from the parent Transaction.
-
- Args:
- entry: An instance of Transaction. This is only used to refer to when
- logging errors.
- group_postings: A list of Posting instances for the group.
- balances: A dict of account name to inventory contents.
- methods: A mapping of account name to their corresponding booking
- method enum.
- Returns:
- A pair of
- booked_postings: A list of booked postings, with reducing lots resolved
- against specific position in the corresponding accounts'
- ante-inventory balances. Note single reducing posting in the input may
- result in multiple postings in the output. Also note that augmenting
- postings held-at-cost will still refer to 'cost' instances of
- CostSpec, left to be interpolated later.
- errors: A list of errors, if there were any.
- """
- errors = []
-
- # A local copy of the balances dictionary which is updated just for the
- # duration of this function's updates, in order to take into account the
- # cumulative effect of all the postings inferred here
- local_balances = {}
-
- empty = inventory.Inventory()
- booked_postings = []
- for posting in group_postings:
- # Process a single posting.
- units = posting.units
- costspec = posting.cost
- account = posting.account
-
- # Note: We ensure there is no mutation on 'balances' to keep this
- # function without side-effects. Note that we may be able to optimize
- # performance later on by giving up this property.
- #
- # Also note that if there is no existing balance, then won't be any lot
- # reduction because none of the postings will be able to match against
- # any currencies of the balance.
- previous_balance = balances.get(account, empty)
- balance = local_balances.setdefault(account, copy.copy(previous_balance))
-
- # Check if this is a lot held at cost.
- if costspec is None or units.number is MISSING:
- # This posting is not held at cost; we do nothing.
- booked_postings.append(posting)
- else:
- # This posting is held at cost; figure out if it's a reduction or an
- # augmentation.
- method = methods[account]
- if (method is not Booking.NONE and
- balance is not None and
- balance.is_reduced_by(units)):
- # This posting is a reduction.
-
- # Match the positions.
- cost_number = compute_cost_number(costspec, units)
- matches = []
- for position in balance:
- # Skip inventory contents of a different currency.
- if (units.currency and
- position.units.currency != units.currency):
- continue
- # Skip balance positions not held at cost.
- if position.cost is None:
- continue
- if (cost_number is not None and
- position.cost.number != cost_number):
- continue
- if (isinstance(costspec.currency, str) and
- position.cost.currency != costspec.currency):
- continue
- if (costspec.date and
- position.cost.date != costspec.date):
- continue
- if (costspec.label and
- position.cost.label != costspec.label):
- continue
- matches.append(position)
-
- # Check for ambiguous matches.
- if len(matches) == 0:
- errors.append(
- ReductionError(entry.meta,
- 'No position matches "{}" against balance {}'.format(
- posting, balance),
- entry))
- return [], errors # This is irreconcilable, remove these postings.
-
- reduction_postings, matched_postings, ambi_errors = (
- booking_method.handle_ambiguous_matches(entry, posting, matches,
- method))
- if ambi_errors:
- errors.extend(ambi_errors)
- return [], errors
-
- # Add the reductions to the resulting list of booked postings.
- booked_postings.extend(reduction_postings)
-
- # Update the local balance in order to avoid matching against
- # the same postings twice when processing multiple postings in
- # the same transaction. Note that we only do this for postings
- # held at cost because the other postings may need interpolation
- # in order to be resolved properly.
- for posting in reduction_postings:
- balance.add_position(posting)
- else:
- # This posting is an augmentation.
- #
- # Note that we do not convert the CostSpec instances to Cost
- # instances, because we want to let the subsequent interpolation
- # process able to interpolate either the cost per-unit or the
- # total cost, separately.
-
- # Put in the date of the parent Transaction if there is no
- # explicit date specified on the spec.
- if costspec.date is None:
- dated_costspec = costspec._replace(date=entry.date)
- posting = posting._replace(cost=dated_costspec)
-
- # FIXME: Insert unique ids for trade tracking; right now this
- # creates ambiguous matches errors (and it shouldn't).
- # # Insert a unique label if there isn't one.
- # if posting.cost is not None and posting.cost.label is None:
- # posting = posting._replace(
- # cost=posting.cost._replace(label=unique_label()))
-
- booked_postings.append(posting)
-
- return booked_postings, errors
-
+
defbook_reductions(entry,group_postings,balances,methods):
+"""Book inventory reductions against the ante-balances.
+
+ This function accepts a dict of (account, Inventory balance) and for each
+ posting that is a reduction against its inventory, attempts to find a
+ corresponding lot or list of lots to reduce the balance with.
+
+ * For reducing lots, the CostSpec instance of the posting is replaced by a
+ Cost instance.
+
+ * For augmenting lots, the CostSpec instance of the posting is left alone,
+ except for its date, which is inherited from the parent Transaction.
+
+ Args:
+ entry: An instance of Transaction. This is only used to refer to when
+ logging errors.
+ group_postings: A list of Posting instances for the group.
+ balances: A dict of account name to inventory contents.
+ methods: A mapping of account name to their corresponding booking
+ method enum.
+ Returns:
+ A pair of
+ booked_postings: A list of booked postings, with reducing lots resolved
+ against specific position in the corresponding accounts'
+ ante-inventory balances. Note single reducing posting in the input may
+ result in multiple postings in the output. Also note that augmenting
+ postings held-at-cost will still refer to 'cost' instances of
+ CostSpec, left to be interpolated later.
+ errors: A list of errors, if there were any.
+ """
+ errors=[]
+
+ # A local copy of the balances dictionary which is updated just for the
+ # duration of this function's updates, in order to take into account the
+ # cumulative effect of all the postings inferred here
+ local_balances={}
+
+ empty=inventory.Inventory()
+ booked_postings=[]
+ forpostingingroup_postings:
+ # Process a single posting.
+ units=posting.units
+ costspec=posting.cost
+ account=posting.account
+
+ # Note: We ensure there is no mutation on 'balances' to keep this
+ # function without side-effects. Note that we may be able to optimize
+ # performance later on by giving up this property.
+ #
+ # Also note that if there is no existing balance, then won't be any lot
+ # reduction because none of the postings will be able to match against
+ # any currencies of the balance.
+ ifaccountnotinlocal_balances:
+ previous_balance=balances.get(account,empty)
+ local_balances[account]=copy.copy(previous_balance)
+ balance=local_balances[account]
+
+ # Check if this is a lot held at cost.
+ ifcostspecisNoneorunits.numberisMISSING:
+ # This posting is not held at cost; we do nothing.
+ booked_postings.append(posting)
+ else:
+ # This posting is held at cost; figure out if it's a reduction or an
+ # augmentation.
+ method=methods[account]
+ if(
+ methodisnotBooking.NONE
+ andbalanceisnotNone
+ andbalance.is_reduced_by(units)
+ ):
+ # This posting is a reduction.
+
+ # Match the positions.
+ cost_number=compute_cost_number(costspec,units)
+ matches=[]
+ forpositioninbalance:
+ # Skip inventory contents of a different currency.
+ ifunits.currencyandposition.units.currency!=units.currency:
+ continue
+ # Skip balance positions not held at cost.
+ ifposition.costisNone:
+ continue
+ ifcost_numberisnotNoneandposition.cost.number!=cost_number:
+ continue
+ if(
+ isinstance(costspec.currency,str)
+ andposition.cost.currency!=costspec.currency
+ ):
+ continue
+ ifcostspec.dateandposition.cost.date!=costspec.date:
+ continue
+ ifcostspec.labelandposition.cost.label!=costspec.label:
+ continue
+ matches.append(position)
+
+ # Check for ambiguous matches.
+ iflen(matches)==0:
+ errors.append(
+ ReductionError(
+ entry.meta,
+ 'No position matches "{}" against balance {}'.format(
+ posting,balance
+ ),
+ entry,
+ )
+ )
+ return[],errors# This is irreconcilable, remove these postings.
+
+ # TODO(blais): We'll have to change this, as we want to allow
+ # positions crossing from negative to positive and vice-versa in
+ # a simple application. See {d3cbd78f1029}.
+ reduction_postings,matched_postings,ambi_errors=(
+ booking_method.handle_ambiguous_matches(entry,posting,matches,method)
+ )
+ ifambi_errors:
+ errors.extend(ambi_errors)
+ return[],errors
+
+ # Add the reductions to the resulting list of booked postings.
+ booked_postings.extend(reduction_postings)
+
+ # Update the local balance in order to avoid matching against
+ # the same postings twice when processing multiple postings in
+ # the same transaction. Note that we only do this for postings
+ # held at cost because the other postings may need interpolation
+ # in order to be resolved properly.
+ forpostinginreduction_postings:
+ balance.add_position(posting)
+ else:
+ # This posting is an augmentation.
+ #
+ # Note that we do not convert the CostSpec instances to Cost
+ # instances, because we want to let the subsequent interpolation
+ # process able to interpolate either the cost per-unit or the
+ # total cost, separately.
+
+ # Put in the date of the parent Transaction if there is no
+ # explicit date specified on the spec.
+ ifcostspec.dateisNone:
+ dated_costspec=costspec._replace(date=entry.date)
+ posting=posting._replace(cost=dated_costspec)
+
+ # FIXME: Insert unique ids for trade tracking; right now this
+ # creates ambiguous matches errors (and it shouldn't).
+ # # Insert a unique label if there isn't one.
+ # if posting.cost is not None and posting.cost.label is None:
+ # posting = posting._replace(
+ # cost=posting.cost._replace(label=unique_label()))
+
+ booked_postings.append(posting)
+
+ returnbooked_postings,errors
+
def categorize_by_currency(entry, balances):
- """Group the postings by the currency they declare.
-
- This is used to prepare the postings for the next stages: Interpolation and
- booking will then be carried out separately on each currency group. At the
- outset of this routine, we should have distinct groups of currencies without
- any ambiguities regarding which currency they need to balance against.
-
- Here's how this works.
-
- - First we apply the constraint that cost-currency and price-currency must
- match, if there is both a cost and a price. This reduces the space of
- possibilities somewhat.
-
- - If the currency is explicitly specified, we put the posting in that
- currency's bucket.
-
- - If not, we have a few methods left to disambiguate the currency:
-
- 1. We look at the remaining postings... if they are all of a single
- currency, the posting must be in that currency too.
-
- 2. If we cannot do that, we inspect the contents of the inventory of the
- account for the posting. If all the contents are of a single currency,
- we use that one.
-
- Args:
- postings: A list of incomplete postings to categorize.
- balances: A dict of currency to inventory contents before the transaction is
- applied.
- Returns:
- A list of (currency string, list of tuples) items describing each postings
- and its interpolated currencies, and a list of generated errors for
- currency interpolation. The entry's original postings are left unmodified.
- Each tuple in the value-list contains:
- index: The posting index in the original entry.
- units_currency: The interpolated currency for units.
- cost_currency: The interpolated currency for cost.
- price_currency: The interpolated currency for price.
- """
- errors = []
-
- groups = collections.defaultdict(list)
- sortdict = {}
- auto_postings = []
- unknown = []
- for index, posting in enumerate(entry.postings):
- units = posting.units
- cost = posting.cost
- price = posting.price
-
- # Extract and override the currencies locally.
- units_currency = (units.currency
- if units is not MISSING and units is not None
- else None)
- cost_currency = (cost.currency
- if cost is not MISSING and cost is not None
- else None)
- price_currency = (price.currency
- if price is not MISSING and price is not None
- else None)
-
- # First we apply the constraint that cost-currency and price-currency
- # must match, if there is both a cost and a price. This reduces the
- # space of possibilities somewhat.
- if cost_currency is MISSING and isinstance(price_currency, str):
- cost_currency = price_currency
- if price_currency is MISSING and isinstance(cost_currency, str):
- price_currency = cost_currency
-
- refer = Refer(index, units_currency, cost_currency, price_currency)
-
- if units is MISSING and price_currency is None:
- # Bucket auto-postings separately from unknown.
- auto_postings.append(refer)
- else:
- # Bucket with what we know so far.
- currency = get_bucket_currency(refer)
- if currency is not None:
- sortdict.setdefault(currency, index)
- groups[currency].append(refer)
- else:
- # If we need to infer the currency, store in unknown.
- unknown.append(refer)
-
- # We look at the remaining postings... if they are all of a single currency,
- # the posting must be in that currency too.
- if unknown and len(unknown) == 1 and len(groups) == 1:
- (index, units_currency, cost_currency, price_currency) = unknown.pop()
-
- other_currency = next(iter(groups.keys()))
- if price_currency is None and cost_currency is None:
- # Infer to the units currency.
- units_currency = other_currency
- else:
- # Infer to the cost and price currencies.
- if price_currency is MISSING:
- price_currency = other_currency
- if cost_currency is MISSING:
- cost_currency = other_currency
-
- refer = Refer(index, units_currency, cost_currency, price_currency)
- currency = get_bucket_currency(refer)
- assert currency is not None
- sortdict.setdefault(currency, index)
- groups[currency].append(refer)
-
- # Finally, try to resolve all the unknown legs using the inventory contents
- # of each account.
- for refer in unknown:
- (index, units_currency, cost_currency, price_currency) = refer
- posting = entry.postings[index]
- balance = balances.get(posting.account, None)
- if balance is None:
- balance = inventory.Inventory()
-
- if units_currency is MISSING:
- balance_currencies = balance.currencies()
- if len(balance_currencies) == 1:
- units_currency = balance_currencies.pop()
-
- if cost_currency is MISSING or price_currency is MISSING:
- balance_cost_currencies = balance.cost_currencies()
- if len(balance_cost_currencies) == 1:
- balance_cost_currency = balance_cost_currencies.pop()
- if price_currency is MISSING:
- price_currency = balance_cost_currency
- if cost_currency is MISSING:
- cost_currency = balance_cost_currency
-
- refer = Refer(index, units_currency, cost_currency, price_currency)
- currency = get_bucket_currency(refer)
- if currency is not None:
- sortdict.setdefault(currency, index)
- groups[currency].append(refer)
- else:
- errors.append(
- CategorizationError(posting.meta,
- "Failed to categorize posting {}".format(index + 1),
- entry))
-
- # Fill in missing units currencies if some remain as missing. This may occur
- # if we used the cost or price to bucket the currency but the units currency
- # was missing.
- for currency, refers in groups.items():
- for rindex, refer in enumerate(refers):
- if refer.units_currency is MISSING:
- posting = entry.postings[refer.index]
- balance = balances.get(posting.account, None)
- if balance is None:
- continue
- balance_currencies = balance.currencies()
- if len(balance_currencies) == 1:
- refers[rindex] = refer._replace(units_currency=balance_currencies.pop())
-
- # Deal with auto-postings.
- if len(auto_postings) > 1:
- refer = auto_postings[-1]
- posting = entry.postings[refer.index]
- errors.append(
- CategorizationError(posting.meta,
- "You may not have more than one auto-posting per currency",
- entry))
- auto_postings = auto_postings[0:1]
- for refer in auto_postings:
- for currency in groups.keys():
- sortdict.setdefault(currency, refer.index)
- groups[currency].append(Refer(refer.index, currency, None, None))
-
- # Issue error for all currencies which we could not resolve.
- for currency, refers in groups.items():
- for refer in refers:
- posting = entry.postings[refer.index]
- for currency, name in [(refer.units_currency, 'units'),
- (refer.cost_currency, 'cost'),
- (refer.price_currency, 'price')]:
- if currency is MISSING:
- errors.append(CategorizationError(
- posting.meta,
- "Could not resolve {} currency".format(name),
- entry))
-
- sorted_groups = sorted(groups.items(), key=lambda item: sortdict[item[0]])
- return sorted_groups, errors
-
+
defcategorize_by_currency(entry,balances):
+"""Group the postings by the currency they declare.
+
+ This is used to prepare the postings for the next stages: Interpolation and
+ booking will then be carried out separately on each currency group. At the
+ outset of this routine, we should have distinct groups of currencies without
+ any ambiguities regarding which currency they need to balance against.
+
+ Here's how this works.
+
+ - First we apply the constraint that cost-currency and price-currency must
+ match, if there is both a cost and a price. This reduces the space of
+ possibilities somewhat.
+
+ - If the currency is explicitly specified, we put the posting in that
+ currency's bucket.
+
+ - If not, we have a few methods left to disambiguate the currency:
+
+ 1. We look at the remaining postings... if they are all of a single
+ currency, the posting must be in that currency too.
+
+ 2. If we cannot do that, we inspect the contents of the inventory of the
+ account for the posting. If all the contents are of a single currency,
+ we use that one.
+
+ Args:
+ postings: A list of incomplete postings to categorize.
+ balances: A dict of currency to inventory contents before the transaction is
+ applied.
+ Returns:
+ A list of (currency string, list of tuples) items describing each postings
+ and its interpolated currencies, and a list of generated errors for
+ currency interpolation. The entry's original postings are left unmodified.
+ Each tuple in the value-list contains:
+ index: The posting index in the original entry.
+ units_currency: The interpolated currency for units.
+ cost_currency: The interpolated currency for cost.
+ price_currency: The interpolated currency for price.
+ """
+ errors=[]
+
+ groups=collections.defaultdict(list)
+ sortdict={}
+ auto_postings=[]
+ unknown=[]
+ forindex,postinginenumerate(entry.postings):
+ units=posting.units
+ cost=posting.cost
+ price=posting.price
+
+ # Extract and override the currencies locally.
+ units_currency=(
+ units.currencyifunitsisnotMISSINGandunitsisnotNoneelseNone
+ )
+ cost_currency=cost.currencyifcostisnotMISSINGandcostisnotNoneelseNone
+ price_currency=(
+ price.currencyifpriceisnotMISSINGandpriceisnotNoneelseNone
+ )
+
+ # First we apply the constraint that cost-currency and price-currency
+ # must match, if there is both a cost and a price. This reduces the
+ # space of possibilities somewhat.
+ ifcost_currencyisMISSINGandisinstance(price_currency,str):
+ cost_currency=price_currency
+ ifprice_currencyisMISSINGandisinstance(cost_currency,str):
+ price_currency=cost_currency
+
+ refer=Refer(index,units_currency,cost_currency,price_currency)
+
+ ifunitsisMISSINGandprice_currencyisNone:
+ # Bucket auto-postings separately from unknown.
+ auto_postings.append(refer)
+ else:
+ # Bucket with what we know so far.
+ currency=get_bucket_currency(refer)
+ ifcurrencyisnotNone:
+ sortdict.setdefault(currency,index)
+ groups[currency].append(refer)
+ else:
+ # If we need to infer the currency, store in unknown.
+ unknown.append(refer)
+
+ # We look at the remaining postings... if they are all of a single currency,
+ # the posting must be in that currency too.
+ ifunknownandlen(unknown)==1andlen(groups)==1:
+ (index,units_currency,cost_currency,price_currency)=unknown.pop()
+
+ other_currency=next(iter(groups.keys()))
+ ifprice_currencyisNoneandcost_currencyisNone:
+ # Infer to the units currency.
+ units_currency=other_currency
+ else:
+ # Infer to the cost and price currencies.
+ ifprice_currencyisMISSING:
+ price_currency=other_currency
+ ifcost_currencyisMISSING:
+ cost_currency=other_currency
+
+ refer=Refer(index,units_currency,cost_currency,price_currency)
+ currency=get_bucket_currency(refer)
+ assertcurrencyisnotNone
+ sortdict.setdefault(currency,index)
+ groups[currency].append(refer)
+
+ # Finally, try to resolve all the unknown legs using the inventory contents
+ # of each account.
+ forreferinunknown:
+ (index,units_currency,cost_currency,price_currency)=refer
+ posting=entry.postings[index]
+ balance=balances.get(posting.account,None)
+ ifbalanceisNone:
+ balance=inventory.Inventory()
+
+ ifunits_currencyisMISSING:
+ balance_currencies=balance.currencies()
+ iflen(balance_currencies)==1:
+ units_currency=balance_currencies.pop()
+
+ ifcost_currencyisMISSINGorprice_currencyisMISSING:
+ balance_cost_currencies=balance.cost_currencies()
+ iflen(balance_cost_currencies)==1:
+ balance_cost_currency=balance_cost_currencies.pop()
+ ifprice_currencyisMISSING:
+ price_currency=balance_cost_currency
+ ifcost_currencyisMISSING:
+ cost_currency=balance_cost_currency
+
+ refer=Refer(index,units_currency,cost_currency,price_currency)
+ currency=get_bucket_currency(refer)
+ ifcurrencyisnotNone:
+ sortdict.setdefault(currency,index)
+ groups[currency].append(refer)
+ else:
+ errors.append(
+ CategorizationError(
+ posting.meta,"Failed to categorize posting {}".format(index+1),entry
+ )
+ )
+
+ # Fill in missing units currencies if some remain as missing. This may occur
+ # if we used the cost or price to bucket the currency but the units currency
+ # was missing.
+ forcurrency,refersingroups.items():
+ forrindex,referinenumerate(refers):
+ ifrefer.units_currencyisMISSING:
+ posting=entry.postings[refer.index]
+ balance=balances.get(posting.account,None)
+ ifbalanceisNone:
+ continue
+ balance_currencies=balance.currencies()
+ iflen(balance_currencies)==1:
+ refers[rindex]=refer._replace(units_currency=balance_currencies.pop())
+
+ # Deal with auto-postings.
+ iflen(auto_postings)>1:
+ refer=auto_postings[-1]
+ posting=entry.postings[refer.index]
+ errors.append(
+ CategorizationError(
+ posting.meta,
+ "You may not have more than one auto-posting per currency",
+ entry,
+ )
+ )
+ auto_postings=auto_postings[0:1]
+ forreferinauto_postings:
+ forcurrency,glistingroups.items():
+ sortdict.setdefault(currency,refer.index)
+ glist.append(Refer(refer.index,currency,None,None))
+
+ # Issue error for all currencies which we could not resolve.
+ forcurrency,refersingroups.items():
+ forreferinrefers:
+ posting=entry.postings[refer.index]
+ forcurrency,namein[
+ (refer.units_currency,"units"),
+ (refer.cost_currency,"cost"),
+ (refer.price_currency,"price"),
+ ]:
+ ifcurrencyisMISSING:
+ errors.append(
+ CategorizationError(
+ posting.meta,
+ "Could not resolve {} currency".format(name),
+ entry,
+ )
+ )
+
+ sorted_groups=sorted(groups.items(),key=lambdaitem:sortdict[item[0]])
+ returnsorted_groups,errors
+
def compute_cost_number(costspec, units):
- """Given a CostSpec, return the cost number, if possible to compute.
-
- Args:
- costspec: A parsed instance of CostSpec.
- units: An instance of Amount for the units of the position.
- Returns:
- If it is not possible to calculate the cost, return None.
- Otherwise, returns a Decimal instance, the per-unit cost.
- """
- number_per = costspec.number_per
- number_total = costspec.number_total
- if MISSING in (number_per, number_total):
- return None
- if number_total is not None:
- # Compute the per-unit cost if there is some total cost
- # component involved.
- cost_total = number_total
- units_number = units.number
- if number_per is not None:
- cost_total += number_per * units_number
- unit_cost = cost_total / abs(units_number)
- elif number_per is None:
- return None
- else:
- unit_cost = number_per
- return unit_cost
-
+
defcompute_cost_number(costspec,units):
+"""Given a CostSpec, return the cost number, if possible to compute.
+
+ Args:
+ costspec: A parsed instance of CostSpec.
+ units: An instance of Amount for the units of the position.
+ Returns:
+ If it is not possible to calculate the cost, return None.
+ Otherwise, returns a Decimal instance, the per-unit cost.
+ """
+ number_per=costspec.number_per
+ number_total=costspec.number_total
+ ifMISSINGin(number_per,number_total):
+ returnNone
+ ifnumber_totalisnotNone:
+ # Compute the per-unit cost if there is some total cost
+ # component involved.
+ cost_total=number_total
+ units_number=abs(units.number)
+ ifnumber_perisnotNone:
+ cost_total+=number_per*units_number
+ unit_cost=cost_total/units_number
+ elifnumber_perisNone:
+ returnNone
+ else:
+ unit_cost=number_per
+ returnunit_cost
+
def convert_costspec_to_cost(posting):
- """Convert an instance of CostSpec to Cost, if present on the posting.
-
- If the posting has no cost, it itself is just returned.
-
- Args:
- posting: An instance of Posting.
- Returns:
- An instance of Posting with a possibly replaced 'cost' attribute.
- """
- cost = posting.cost
- if isinstance(cost, position.CostSpec):
- if cost is not None:
- units_number = posting.units.number
- number_per = cost.number_per
- number_total = cost.number_total
- if number_total is not None:
- # Compute the per-unit cost if there is some total cost
- # component involved.
- cost_total = number_total
- if number_per is not MISSING:
- cost_total += number_per * units_number
- unit_cost = cost_total / abs(units_number)
- else:
- unit_cost = number_per
- new_cost = Cost(unit_cost, cost.currency, cost.date, cost.label)
- posting = posting._replace(units=posting.units, cost=new_cost)
- return posting
-
+
defconvert_costspec_to_cost(posting):
+"""Convert an instance of CostSpec to Cost, if present on the posting.
+
+ If the posting has no cost, it itself is just returned.
+
+ Args:
+ posting: An instance of Posting.
+ Returns:
+ An instance of Posting with a possibly replaced 'cost' attribute.
+ """
+ cost=posting.cost
+ ifisinstance(cost,position.CostSpec):
+ ifcostisnotNone:
+ number_per=cost.number_per
+ number_total=cost.number_total
+ ifnumber_totalisnotNone:
+ # Compute the per-unit cost if there is some total cost
+ # component involved.
+ units_number=abs(posting.units.number)
+ cost_total=number_total
+ ifnumber_perisnotMISSING:
+ cost_total+=number_per*units_number
+ unit_cost=cost_total/units_number
+ else:
+ unit_cost=number_per
+ new_cost=Cost(unit_cost,cost.currency,cost.date,cost.label)
+ posting=posting._replace(units=posting.units,cost=new_cost)
+ returnposting
+
def has_self_reduction(postings, methods):
- """Return true if the postings potentially reduce each other at cost.
-
- Args:
- postings: A list of postings with uninterpolated CostSpec cost instances.
- methods: A mapping of account name to their corresponding booking
- method.
- Returns:
- A boolean, true if there's a potential for self-reduction.
- """
- # A mapping of (currency, cost-currency) and sign.
- cost_changes = {}
- for posting in postings:
- cost = posting.cost
- if cost is None:
- continue
- if methods[posting.account] is Booking.NONE:
- continue
- key = (posting.account, posting.units.currency)
- sign = 1 if posting.units.number > ZERO else -1
- if cost_changes.setdefault(key, sign) != sign:
- return True
- return False
-
+
defhas_self_reduction(postings,methods):
+"""Return true if the postings potentially reduce each other at cost.
+
+ Args:
+ postings: A list of postings with uninterpolated CostSpec cost instances.
+ methods: A mapping of account name to their corresponding booking
+ method.
+ Returns:
+ A boolean, true if there's a potential for self-reduction.
+ """
+ # A mapping of (currency, cost-currency) and sign.
+ cost_changes={}
+ forpostinginpostings:
+ cost=posting.cost
+ ifcostisNone:
+ continue
+ ifmethods[posting.account]isBooking.NONE:
+ continue
+ key=(posting.account,posting.units.currency)
+ sign=1ifposting.units.number>ZEROelse-1
+ ifcost_changes.setdefault(key,sign)!=sign:
+ returnTrue
+ returnFalse
+
A tuple of
- postings – A lit of new posting instances.
+ postings – A list of new posting instances.
errors: A list of errors generated during interpolation.
interpolated: A boolean, true if we did have to interpolate.
In the case of an error, this returns the original list of postings, which
@@ -2799,227 +3049,259 @@
Source code in beancount/parser/booking_full.py
-
def interpolate_group(postings, balances, currency, tolerances):
- """Interpolate missing numbers in the set of postings.
-
- Args:
- postings: A list of Posting instances.
- balances: A dict of account to its ante-inventory.
- currency: The weight currency of this group, used for reporting errors.
- tolerances: A dict of currency to tolerance values.
- Returns:
- A tuple of
- postings: A lit of new posting instances.
- errors: A list of errors generated during interpolation.
- interpolated: A boolean, true if we did have to interpolate.
-
- In the case of an error, this returns the original list of postings, which
- is still incomplete. If an error is returned, you should probably skip the
- transaction altogether, or just not include the postings in it. (An
- alternative behaviour would be to return only the list of valid postings,
- but that would likely result in an unbalanced transaction. We do it this
- way by choice.)
- """
- errors = []
-
- # Figure out which type of amount is missing, by creating a list of
- # incomplete postings and which type of units is missing.
- incomplete = []
- for index, posting in enumerate(postings):
- units = posting.units
- cost = posting.cost
- price = posting.price
-
- # Identify incomplete parts of the Posting components.
- if units.number is MISSING:
- incomplete.append((MissingType.UNITS, index))
-
- if isinstance(cost, CostSpec):
- if cost and cost.number_per is MISSING:
- incomplete.append((MissingType.COST_PER, index))
- if cost and cost.number_total is MISSING:
- incomplete.append((MissingType.COST_TOTAL, index))
- else:
- # Check that a resolved instance of Cost never needs interpolation.
- #
- # Note that in theory we could support the interpolation of regular
- # per-unit costs in these if we wanted to; but because they're all
- # reducing postings that have been booked earlier, those never need
- # to be interpolated.
- if cost is not None:
- assert isinstance(cost.number, Decimal), (
- "Internal error: cost has no number: {}".format(cost))
-
- if price and price.number is MISSING:
- incomplete.append((MissingType.PRICE, index))
-
- # The replacement posting for the incomplete posting of this group.
- new_posting = None
-
- if len(incomplete) == 0:
- # If there are no missing numbers, just convert the CostSpec to Cost and
- # return that.
- out_postings = [convert_costspec_to_cost(posting)
- for posting in postings]
-
- elif len(incomplete) > 1:
- # If there is more than a single value to be interpolated, generate an
- # error and return no postings.
- _, posting_index = incomplete[0]
- errors.append(InterpolationError(
- postings[posting_index].meta,
- "Too many missing numbers for currency group '{}'".format(currency),
- None))
- out_postings = []
-
- else:
- # If there is a single missing number, calculate it and fill it in here.
- missing, index = incomplete[0]
- incomplete_posting = postings[index]
-
- # Convert augmenting postings' costs from CostSpec to corresponding Cost
- # instances, except for the incomplete posting.
- new_postings = [(posting
- if posting is incomplete_posting
- else convert_costspec_to_cost(posting))
- for posting in postings]
-
- # Compute the balance of the other postings.
- residual = interpolate.compute_residual(posting
- for posting in new_postings
- if posting is not incomplete_posting)
- assert len(residual) < 2, "Internal error in grouping postings by currencies."
- if not residual.is_empty():
- respos = next(iter(residual))
- assert respos.cost is None, (
- "Internal error; cost appears in weight calculation.")
- assert respos.units.currency == currency, (
- "Internal error; residual different than currency group.")
- weight = -respos.units.number
- weight_currency = respos.units.currency
- else:
- weight = ZERO
- weight_currency = currency
-
- if missing == MissingType.UNITS:
- units = incomplete_posting.units
- cost = incomplete_posting.cost
- if cost:
- # Handle the special case where we only have total cost.
- if cost.number_per == ZERO:
- errors.append(InterpolationError(
- incomplete_posting.meta,
- "Cannot infer per-unit cost only from total", None))
- return postings, errors, True
-
- assert cost.currency == weight_currency, (
- "Internal error; residual currency different than missing currency.")
- cost_total = cost.number_total or ZERO
- units_number = (weight - cost_total) / cost.number_per
-
- elif incomplete_posting.price:
- assert incomplete_posting.price.currency == weight_currency, (
- "Internal error; residual currency different than missing currency.")
- units_number = weight / incomplete_posting.price.number
-
- else:
- assert units.currency == weight_currency, (
- "Internal error; residual currency different than missing currency.")
- units_number = weight
-
- # Quantize the interpolated units if necessary.
- units_number = interpolate.quantize_with_tolerance(tolerances,
- units.currency,
- units_number)
-
- if weight != ZERO:
- new_pos = Position(Amount(units_number, units.currency), cost)
- new_posting = incomplete_posting._replace(units=new_pos.units,
- cost=new_pos.cost)
- else:
- new_posting = None
-
- elif missing == MissingType.COST_PER:
- units = incomplete_posting.units
- cost = incomplete_posting.cost
- assert cost.currency == weight_currency, (
- "Internal error; residual currency different than missing currency.")
- if units.number != ZERO:
- number_per = (weight - (cost.number_total or ZERO)) / units.number
- new_cost = cost._replace(number_per=number_per)
- new_pos = Position(units, new_cost)
- new_posting = incomplete_posting._replace(units=new_pos.units,
- cost=new_pos.cost)
- else:
- new_posting = None
-
- elif missing == MissingType.COST_TOTAL:
- units = incomplete_posting.units
- cost = incomplete_posting.cost
- assert cost.currency == weight_currency, (
- "Internal error; residual currency different than missing currency.")
- number_total = (weight - cost.number_per * units.number)
- new_cost = cost._replace(number_total=number_total)
- new_pos = Position(units, new_cost)
- new_posting = incomplete_posting._replace(units=new_pos.units,
- cost=new_pos.cost)
-
- elif missing == MissingType.PRICE:
- units = incomplete_posting.units
- cost = incomplete_posting.cost
- if cost is not None:
- errors.append(InterpolationError(
- incomplete_posting.meta,
- "Cannot infer price for postings with units held at cost", None))
- return postings, errors, True
- else:
- price = incomplete_posting.price
- assert price.currency == weight_currency, (
- "Internal error; residual currency different than missing currency.")
- new_price_number = abs(weight / units.number)
- new_posting = incomplete_posting._replace(price=Amount(new_price_number,
- price.currency))
-
- else:
- assert False, "Internal error; Invalid missing type."
-
- # Replace the number in the posting.
- if new_posting is not None:
- # Set meta-data on the new posting to indicate it was interpolated.
- if new_posting.meta is None:
- new_posting = new_posting._replace(meta={})
- new_posting.meta[interpolate.AUTOMATIC_META] = True
-
- # Convert augmenting posting costs from CostSpec to a corresponding
- # Cost instance.
- new_postings[index] = convert_costspec_to_cost(new_posting)
- else:
- del new_postings[index]
- out_postings = new_postings
-
- assert all(not isinstance(posting.cost, CostSpec)
- for posting in out_postings)
-
- # Check that units are non-zero and that no cost remains negative; issue an
- # error if this is the case.
- for posting in out_postings:
- if posting.cost is None:
- continue
- # If there is a cost, we don't allow either a cost value of zero,
- # nor a zero number of units. Note that we allow a price of zero as
- # the only special case allowed (for conversion entries), but never
- # for costs.
- if posting.units.number == ZERO:
- errors.append(InterpolationError(
- posting.meta,
- 'Amount is zero: "{}"'.format(posting.units), None))
- if posting.cost.number < ZERO:
- errors.append(InterpolationError(
- posting.meta,
- 'Cost is negative: "{}"'.format(posting.cost), None))
-
- return out_postings, errors, (new_posting is not None)
-
+
definterpolate_group(postings,balances,currency,tolerances):
+"""Interpolate missing numbers in the set of postings.
+
+ Args:
+ postings: A list of Posting instances.
+ balances: A dict of account to its ante-inventory.
+ currency: The weight currency of this group, used for reporting errors.
+ tolerances: A dict of currency to tolerance values.
+ Returns:
+ A tuple of
+ postings: A list of new posting instances.
+ errors: A list of errors generated during interpolation.
+ interpolated: A boolean, true if we did have to interpolate.
+
+ In the case of an error, this returns the original list of postings, which
+ is still incomplete. If an error is returned, you should probably skip the
+ transaction altogether, or just not include the postings in it. (An
+ alternative behaviour would be to return only the list of valid postings,
+ but that would likely result in an unbalanced transaction. We do it this
+ way by choice.)
+ """
+ errors=[]
+
+ # Figure out which type of amount is missing, by creating a list of
+ # incomplete postings and which type of units is missing.
+ incomplete=[]
+ forindex,postinginenumerate(postings):
+ units=posting.units
+ cost=posting.cost
+ price=posting.price
+
+ # Identify incomplete parts of the Posting components.
+ ifunits.numberisMISSING:
+ incomplete.append((MissingType.UNITS,index))
+
+ ifisinstance(cost,CostSpec):
+ ifcostandcost.number_perisMISSING:
+ incomplete.append((MissingType.COST_PER,index))
+ ifcostandcost.number_totalisMISSING:
+ incomplete.append((MissingType.COST_TOTAL,index))
+ else:
+ # Check that a resolved instance of Cost never needs interpolation.
+ #
+ # Note that in theory we could support the interpolation of regular
+ # per-unit costs in these if we wanted to; but because they're all
+ # reducing postings that have been booked earlier, those never need
+ # to be interpolated.
+ ifcostisnotNone:
+ assertisinstance(
+ cost.number,Decimal
+ ),"Internal error: cost has no number: {}; on postings: {}".format(
+ cost,postings
+ )
+
+ ifpriceandprice.numberisMISSING:
+ incomplete.append((MissingType.PRICE,index))
+
+ # The replacement posting for the incomplete posting of this group.
+ new_posting=None
+
+ iflen(incomplete)==0:
+ # If there are no missing numbers, just convert the CostSpec to Cost and
+ # return that.
+ out_postings=[convert_costspec_to_cost(posting)forpostinginpostings]
+
+ eliflen(incomplete)>1:
+ # If there is more than a single value to be interpolated, generate an
+ # error and return no postings.
+ _,posting_index=incomplete[0]
+ errors.append(
+ InterpolationError(
+ postings[posting_index].meta,
+ "Too many missing numbers for currency group '{}'".format(currency),
+ None,
+ )
+ )
+ out_postings=[]
+
+ else:
+ # If there is a single missing number, calculate it and fill it in here.
+ missing,index=incomplete[0]
+ incomplete_posting=postings[index]
+
+ # Convert augmenting postings' costs from CostSpec to corresponding Cost
+ # instances, except for the incomplete posting.
+ new_postings=[
+ (
+ posting
+ ifpostingisincomplete_posting
+ elseconvert_costspec_to_cost(posting)
+ )
+ forpostinginpostings
+ ]
+
+ # Compute the balance of the other postings.
+ residual=interpolate.compute_residual(
+ postingforpostinginnew_postingsifpostingisnotincomplete_posting
+ )
+ assertlen(residual)<2,"Internal error in grouping postings by currencies."
+ ifnotresidual.is_empty():
+ respos=next(iter(residual))
+ assert(
+ respos.costisNone
+ ),"Internal error; cost appears in weight calculation."
+ assert(
+ respos.units.currency==currency
+ ),"Internal error; residual different than currency group."
+ weight=-respos.units.number
+ weight_currency=respos.units.currency
+ else:
+ weight=ZERO
+ weight_currency=currency
+
+ ifmissing==MissingType.UNITS:
+ units=incomplete_posting.units
+ cost=incomplete_posting.cost
+ ifcost:
+ # Handle the special case where we only have total cost.
+ ifcost.number_per==ZERO:
+ errors.append(
+ InterpolationError(
+ incomplete_posting.meta,
+ "Cannot infer per-unit cost only from total",
+ None,
+ )
+ )
+ returnpostings,errors,True
+
+ assert(
+ cost.currency==weight_currency
+ ),"Internal error; residual currency different than missing currency."
+ cost_total=cost.number_totalorZERO
+ units_number=(weight-cost_total)/cost.number_per
+
+ elifincomplete_posting.price:
+ assert(
+ incomplete_posting.price.currency==weight_currency
+ ),"Internal error; residual currency different than missing currency."
+ units_number=weight/incomplete_posting.price.number
+
+ else:
+ assert(
+ units.currency==weight_currency
+ ),"Internal error; residual currency different than missing currency."
+ units_number=weight
+
+ # Quantize the interpolated units if necessary.
+ units_number=interpolate.quantize_with_tolerance(
+ tolerances,units.currency,units_number
+ )
+
+ ifweight!=ZERO:
+ new_pos=Position(Amount(units_number,units.currency),cost)
+ new_posting=incomplete_posting._replace(
+ units=new_pos.units,cost=new_pos.cost
+ )
+ else:
+ new_posting=None
+
+ elifmissing==MissingType.COST_PER:
+ units=incomplete_posting.units
+ cost=incomplete_posting.cost
+ assert(
+ cost.currency==weight_currency
+ ),"Internal error; residual currency different than missing currency."
+ ifunits.number!=ZERO:
+ number_per=(weight-(cost.number_totalorZERO))/units.number
+ new_cost=cost._replace(number_per=number_per)
+ new_pos=Position(units,new_cost)
+ new_posting=incomplete_posting._replace(
+ units=new_pos.units,cost=new_pos.cost
+ )
+ else:
+ new_posting=None
+
+ elifmissing==MissingType.COST_TOTAL:
+ units=incomplete_posting.units
+ cost=incomplete_posting.cost
+ assert(
+ cost.currency==weight_currency
+ ),"Internal error; residual currency different than missing currency."
+ number_total=weight-cost.number_per*units.number
+ new_cost=cost._replace(number_total=number_total)
+ new_pos=Position(units,new_cost)
+ new_posting=incomplete_posting._replace(
+ units=new_pos.units,cost=new_pos.cost
+ )
+
+ elifmissing==MissingType.PRICE:
+ units=incomplete_posting.units
+ cost=incomplete_posting.cost
+ ifcostisnotNone:
+ errors.append(
+ InterpolationError(
+ incomplete_posting.meta,
+ "Cannot infer price for postings with units held at cost",
+ None,
+ )
+ )
+ returnpostings,errors,True
+ else:
+ price=incomplete_posting.price
+ assert(
+ price.currency==weight_currency
+ ),"Internal error; residual currency different than missing currency."
+ new_price_number=abs(weight/units.number)
+ new_posting=incomplete_posting._replace(
+ price=Amount(new_price_number,price.currency)
+ )
+
+ else:
+ assertFalse,"Internal error; Invalid missing type."
+
+ # Replace the number in the posting.
+ ifnew_postingisnotNone:
+ # Set meta-data on the new posting to indicate it was interpolated.
+ ifnew_posting.metaisNone:
+ new_posting=new_posting._replace(meta={})
+ new_posting.meta[interpolate.AUTOMATIC_META]=True
+
+ # Convert augmenting posting costs from CostSpec to a corresponding
+ # Cost instance.
+ new_postings[index]=convert_costspec_to_cost(new_posting)
+ else:
+ delnew_postings[index]
+ out_postings=new_postings
+
+ assertall(notisinstance(posting.cost,CostSpec)forpostinginout_postings)
+
+ # Check that units are non-zero and that no cost remains negative; issue an
+ # error if this is the case.
+ forpostinginout_postings:
+ ifposting.costisNone:
+ continue
+ # If there is a cost, we don't allow either a cost value of zero,
+ # nor a zero number of units. Note that we allow a price of zero as
+ # the only special case allowed (for conversion entries), but never
+ # for costs.
+ ifposting.units.number==ZERO:
+ errors.append(
+ InterpolationError(
+ posting.meta,'Amount is zero: "{}"'.format(posting.units),None
+ )
+ )
+ ifposting.cost.numberisnotNoneandposting.cost.number<ZERO:
+ errors.append(
+ InterpolationError(
+ posting.meta,'Cost is negative: "{}"'.format(posting.cost),None
+ )
+ )
+
+ returnout_postings,errors,(new_postingisnotNone)
+
def replace_currencies(postings, refer_groups):
- """Replace resolved currencies in the entry's Postings.
-
- This essentially applies the findings of categorize_by_currency() to produce
- new postings with all currencies resolved.
-
- Args:
- postings: A list of Posting instances to replace.
- refer_groups: A list of (currency, list of posting references) items as
- returned by categorize_by_currency().
- Returns:
- A new list of items of (currency, list of Postings), postings for which the
- currencies have been replaced by their interpolated currency values.
- """
- new_groups = []
- for currency, refers in refer_groups:
- new_postings = []
- for refer in sorted(refers, key=lambda r: r.index):
- posting = postings[refer.index]
- units = posting.units
- if units is MISSING or units is None:
- posting = posting._replace(units=Amount(MISSING, refer.units_currency))
- else:
- replace = False
- cost = posting.cost
- price = posting.price
- if units.currency is MISSING:
- units = Amount(units.number, refer.units_currency)
- replace = True
- if cost and cost.currency is MISSING:
- cost = cost._replace(currency=refer.cost_currency)
- replace = True
- if price and price.currency is MISSING:
- price = Amount(price.number, refer.price_currency)
- replace = True
- if replace:
- posting = posting._replace(units=units, cost=cost, price=price)
- new_postings.append(posting)
- new_groups.append((currency, new_postings))
- return new_groups
-
+
defreplace_currencies(postings,refer_groups):
+"""Replace resolved currencies in the entry's Postings.
+
+ This essentially applies the findings of categorize_by_currency() to produce
+ new postings with all currencies resolved.
+
+ Args:
+ postings: A list of Posting instances to replace.
+ refer_groups: A list of (currency, list of posting references) items as
+ returned by categorize_by_currency().
+ Returns:
+ A new list of items of (currency, list of Postings), postings for which the
+ currencies have been replaced by their interpolated currency values.
+ """
+ new_groups=[]
+ forcurrency,refersinrefer_groups:
+ new_postings=[]
+ forreferinsorted(refers,key=lambdar:r.index):
+ posting=postings[refer.index]
+ units=posting.units
+ ifunitsisMISSINGorunitsisNone:
+ posting=posting._replace(units=Amount(MISSING,refer.units_currency))
+ else:
+ replace=False
+ cost=posting.cost
+ price=posting.price
+ ifunits.currencyisMISSING:
+ units=Amount(units.number,refer.units_currency)
+ replace=True
+ ifcostandcost.currencyisMISSING:
+ cost=cost._replace(currency=refer.cost_currency)
+ replace=True
+ ifpriceandprice.currencyisMISSING:
+ price=Amount(price.number,refer.price_currency)
+ replace=True
+ ifreplace:
+ posting=posting._replace(units=units,cost=cost,price=price)
+ new_postings.append(posting)
+ new_groups.append((currency,new_postings))
+ returnnew_groups
+