\input{share/preamble} %\VignetteIndexEntry{Additional continuous and discrete distributions} %\VignettePackage{actuar} %\SweaveUTF8 \title{Inventory of continuous and discrete distributions in \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Nicholas Langevin \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} %% Compact, sans label itemize environment for the appendices. \setlist[itemize]{label={},leftmargin=0pt,align=left,nosep,midpenalty=10000} \begin{document} \maketitle \section{Introduction} \label{sec:introduction} R includes functions to compute the probability density function (pdf) or the probability mass function (pmf), the cumulative distribution function (cdf) and the quantile function, as well as functions to generate variates from a fair number of continuous and discrete distributions. For some root \code{foo}, the support functions are named \code{dfoo}, \code{pfoo}, \code{qfoo} and \code{rfoo}, respectively. Package \pkg{actuar} provides \code{d}, \code{p}, \code{q} and \code{r} functions for a large number of continuous size distributions useful for loss severity modeling; for phase-type distributions used in computation of ruin probabilities; for zero-truncated and zero-modified extensions of the discrete distributions commonly used in loss frequency modeling; for the heavy tailed Poisson-inverse Gaussian discrete distribution. The package also introduces support functions to compute raw moments, limited moments and the moment generating function (when it exists) of continuous distributions. \section{Additional continuous size distributions} \label{sec:continuous} The package provides support functions for all the probability distributions found in Appendix~A of \citet{LossModels4e} and not already present in base R, excluding the log-$t$, but including the loggamma distribution \citep{HoggKlugman}, as well as for the Feller--Pareto distribution and related Pareto distributions with a location parameter \citep{Arnold:pareto:2ed}. These distributions mostly fall under the umbrella of extreme value or heavy tailed distributions. \autoref{tab:continuous} lists the distributions supported by \pkg{actuar} along with the root names of the R functions. \autoref{sec:app:continuous} details the formulas implemented and the name of the argument corresponding to each parameter. By default, all functions (except those for the Pareto distribution) use a rate parameter equal to the inverse of the scale parameter. This differs from \citet{LossModels4e} but is better in line with the functions for the gamma, exponential and Weibull distributions in base R. \begin{table} \centering \begin{tabular}{lll} \toprule Family & Distribution & Root \\ \midrule Feller--Pareto & Feller--Pareto & \code{fpareto} \\ & Pareto IV & \code{pareto4} \\ & Pareto III & \code{pareto3} \\ & Pareto II & \code{pareto2} \\ & Transformed beta & \code{trbeta} \\ & Burr & \code{burr} \\ & Loglogistic & \code{llogis} \\ & Paralogistic & \code{paralogis} \\ & Generalized Pareto & \code{genpareto} \\ & Pareto & \code{pareto} \\ & Single-parameter Pareto & \code{pareto1} \\ & Inverse Burr & \code{invburr} \\ & Inverse Pareto & \code{invpareto} \\ & Inverse paralogistic & \code{invparalogis} \\ \midrule Transformed gamma & Transformed gamma & \code{trgamma} \\ & Inverse transformed gamma & \code{invtrgamma} \\ & Inverse gamma & \code{invgamma} \\ & Inverse Weibull & \code{invweibull} \\ & Inverse exponential & \code{invexp} \\ \midrule Other & Loggamma & \code{lgamma} \\ & Gumbel & \code{gumbel} \\ & Inverse Gaussian & \code{invgauss} \\ & Generalized beta & \code{genbeta} \\ \bottomrule \end{tabular} \caption{Probability distributions supported by \pkg{actuar} classified by family and root names of the R functions.} \label{tab:continuous} \end{table} We mostly use the nomenclature of \citet{LossModels4e} to classify the continuous distributions supported by \pkg{actuar}. However, following \citet{Arnold:pareto:2ed}, we regroup distributions of the transformed beta family and variants of the Pareto distribution inside the larger Feller--Pareto family of distributions. \autoref{fig:diagram:fp-family} shows the relationships between the distributions of the Feller--Pareto and transformed beta families. \autoref{fig:diagram:trgamma-family} does the same for the distributions of the transformed gamma and inverse transformed gamma families. \begin{figure} \centering \setlength{\unitlength}{0.7cm} \begin{picture}(16.9,10.75)(-0.7,-0.4) \small % Flèches \put(8,6){\vector(2,-1){3.7}} % trbeta -> invburr \put(13,4.2){\vector(0,1){0.95}} % invburr -> invparalogis \put(11.7,3.1){\line(-1,-1){1}} \put(10.7,2.1){\line(-1,0){7.7}} \put(3,2.1){\vector(-1,-1){1.1}} % invburr -> llogis \put(13,3){\vector(0,-1){2}} % invburr -> invpareto \put(2.05,3.1){\vector(2,-1){4.2}} % burr -> pareto \put(1,3){\vector(0,-1){2}} % burr -> llogis \put(6,6){\vector(-2,-1){3.85}} % trbeta -> burr \put(1,4.2){\vector(0,1){0.95}} % burr -> paralogis \put(7,6){\vector(0,-1){1.8}} % trbeta -> genpareto \put(7,9){\vector(0,-1){1.8}} % fpareto -> trbeta \put(7,3){\vector(0,-1){2}} % genpareto -> pareto \put(8,3){\vector(2,-1){4}} % genpareto -> invpareto % \put(6,9){\vector(-2,-1){3.3}} % fpareto -> pareto3 % \put(8,9){\vector(2,-1){3.3}} % fpareto -> pareto1 \put(1,9){\vector(0,-1){1.1}} % pareto4 -> pareto3 \put(13,9){\vector(0,-1){1.1}} % pareto2 -> pareto1 \put(4.5,9.6){\vector(-1,0){1.75}} % fpareto -> pareto4 \put(9.5,9.6){\vector(1,0){1.75}} % fpareto -> pareto2 \put(14.7,9.6){\line(1,0){1.5}} % pareto2 -> pareto \put(16.2,9.6){\line(0,-1){10}} \put(16.2,-0.4){\line(-1,0){7.5}} \put(8.7,-0.4){\vector(-2,1){0.72}} \put(14.8,9.62){\makebox(0,0.5)[l]{$\mu = 0$}} \put(7,9.65){\makebox(0,0.5)[c]{Feller-Pareto}} \put(7,9.1){\makebox(0,0.5)[c]{$\mu, \alpha, \gamma, \tau, \theta$}} \put(7,9.6){\oval(5,1.2)} \put(3.2,9.65){\makebox(0,0.5)[l]{$\tau = 1$}} \put(1,9.65){\makebox(0,0.5)[c]{Pareto IV}} \put(1,9.1){\makebox(0,0.5)[c]{$\mu, \alpha, \gamma, \theta$}} \put(1,9.6){\oval(3.4,1.2)} \put(9.8,9.05){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(9.8,9.65){\makebox(0,0.5)[l]{$\tau = 1$}} \put(13,9.65){\makebox(0,0.5)[c]{Pareto II}} \put(13,9.1){\makebox(0,0.5)[c]{$\mu,\alpha, \theta$}} \put(13,9.6){\oval(3.4,1.2)} \put(0.8,8.3){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,7.35){\makebox(0,0.5)[c]{Pareto III}} \put(1,6.8){\makebox(0,0.5)[c]{$\mu, \gamma, \theta$}} \put(1,7.3){\oval(3.4,1.2)} \put(13.2,8.3){\makebox(0,0.5)[l]{$\mu = \theta$}} \put(13,7.35){\makebox(0,0.5)[c]{Pareto I}} \put(13,6.8){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(13,7.3){\oval(3.4,1.2)} \put(7.2,7.9){\makebox(0,0.5)[l]{$\mu = 0$}} \put(7,6.65){\makebox(0,0.5)[c]{Transformed beta}} \put(7,6.1){\makebox(0,0.5)[c]{$\alpha, \gamma, \tau, \theta$}} \put(7,6.6){\oval(5,1.2)} \put(9.2,5.4){\rotatebox{-26.6}{\makebox(0,0.5)[l]{$\alpha = 1$}}} \put(13.20,3.65){\makebox(0,0.5)[c]{Inverse Burr}} \put(13.20,3.1){\makebox(0,0.5)[c]{$\gamma, \tau, \theta$}} \put(13.20,3.6){\oval(3.4,1.2)} \put(13.2,4.3){\makebox(0,0.5)[l]{$\gamma = \tau$}} \put(13.20,5.80){\makebox(0,0.5)[c]{Inverse paralogistic}} \put(13.20,5.25){\makebox(0,0.5)[c]{$\tau, \theta$}} \put(13.20,5.75){\oval(5.4,1.2)} \put(13.2,1.9){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(13.20,0.45){\makebox(0,0.5)[c]{Inverse Pareto}} \put(13.20,-0.1){\makebox(0,0.5)[c]{$\tau, \theta$}} \put(13.20,0.4){\oval(3.9,1.2)} \put(7.2,4.9){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(7,3.65){\makebox(0,0.5)[c]{Generalized Pareto}} \put(7,3.1){\makebox(0,0.5)[c]{$\alpha, \tau, \theta$}} \put(7,3.6){\oval(4.9,1.2)} \put(7.2,1.25){\makebox(0,0.5)[l]{$\tau = 1$}} \put(7,0.45){\makebox(0,0.5)[c]{Pareto}} \put(7,-0.1){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(7,0.4){\oval(2.2,1.2)} \put(4.5,5.4){\rotatebox{26.6}{\makebox(0,0.5)[r]{$\tau = 1$}}} \put(1,3.65){\makebox(0,0.5)[c]{Burr}} \put(1,3.1){\makebox(0,0.5)[c]{$\alpha, \gamma, \theta$}} \put(1,3.6){\oval(2.5,1.2)} \put(0.8,4.3){\makebox(0,0.5)[r]{$\gamma = \alpha$}} \put(1,5.80){\makebox(0,0.5)[c]{Paralogistic}} \put(1,5.25){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(1,5.75){\oval(3.4,1.2)} \put(0.8,1.9){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,0.45){\makebox(0,0.5)[c]{Loglogistic}} \put(1,-0.1){\makebox(0,0.5)[c]{$\gamma, \theta$}} \put(1,0.4){\oval(3.4,1.2)} \put(9.8,2.1){\rotatebox{-26.6}{\makebox(0,0.5)[r]{$\alpha = 1$}}} \put(4.0,2.1){\rotatebox{-26.6}{\makebox(0,0.5)[r]{$\gamma = 1$}}} \put(11.25,3.0){\rotatebox{45}{\makebox(0,0.5)[r]{$\tau = 1$}}} \end{picture} \caption{Interrelations between distributions of the Feller--Pareto family. This diagram is an extension of Figure~5.2 of \citet{LossModels4e}.} \label{fig:diagram:fp-family} \end{figure} \begin{figure} \setlength{\unitlength}{0.7cm} \begin{picture}(7.5,5.2)(-0.25,0) \small % Flèches \put(4,4){\vector(2,-1){1.55}} % trgamma -> weibull \put(5.55,2){\vector(-2,-1){1.55}} % weibull -> exp \put(1.55,2){\vector(2,-1){1.55}} % gamma -> exp \put(3,4){\vector(-2,-1){1.55}} % trgamma -> gamma \put(3.5,4.65){\makebox(0,0.5)[c]{Transformed gamma}} \put(3.5,4.1){\makebox(0,0.5)[c]{$\alpha, \tau, \lambda$}} \put(3.5,4.6){\oval(5.5,1.2)} \put(5.4,3.45){\makebox(0,0.5)[l]{$\alpha = 1$}} \put(6,2.65){\makebox(0,0.5)[c]{Weibull}} \put(6,2.1){\makebox(0,0.5)[c]{$\tau, \lambda$}} \put(6,2.6){\oval(2.5,1.2)} \put(5.4,1.35){\makebox(0,0.5)[l]{$\tau = 1$}} \put(3.5,0.65){\makebox(0,0.5)[c]{Exponential}} \put(3.5,0.1){\makebox(0,0.5)[c]{$\lambda$}} \put(3.5,0.6){\oval(3.5,1.2)} \put(1.6,1.35){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,2.65){\makebox(0,0.5)[c]{Gamma}} \put(1,2.1){\makebox(0,0.5)[c]{$\alpha, \lambda$}} \put(1,2.6){\oval(2.5,1.2)} \put(1.6,3.45){\makebox(0,0.5)[r]{$\tau = 1$}} \end{picture} \hfill \begin{picture}(8.75,5.2)(-0.875,0) \small % Flèches \put(4,4){\vector(2,-1){1.55}} % trgamma -> weibull \put(5.55,2){\vector(-2,-1){1.55}} % weibull -> exp \put(1.55,2){\vector(2,-1){1.55}} % gamma -> exp \put(3,4){\vector(-2,-1){1.55}} % trgamma -> gamma \put(3.5,4.65){\makebox(0,0.5)[c]{Inverse transformed gamma}} \put(3.5,4.1){\makebox(0,0.5)[c]{$\alpha, \tau, \lambda$}} \put(3.5,4.6){\oval(7,1.2)} \put(5.4,3.45){\makebox(0,0.5)[l]{$\alpha = 1$}} \put(6,2.65){\makebox(0,0.5)[c]{Inverse Weibull}} \put(6,2.1){\makebox(0,0.5)[c]{$\tau, \lambda$}} \put(6,2.6){\oval(4,1.2)} \put(5.4,1.35){\makebox(0,0.5)[l]{$\tau = 1$}} \put(3.5,0.65){\makebox(0,0.5)[c]{Inverse exponential}} \put(3.5,0.1){\makebox(0,0.5)[c]{$\lambda$}} \put(3.5,0.6){\oval(5,1.2)} \put(1.6,1.35){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,2.65){\makebox(0,0.5)[c]{Inverse gamma}} \put(1,2.1){\makebox(0,0.5)[c]{$\alpha, \lambda$}} \put(1,2.6){\oval(4,1.2)} \put(1.6,3.45){\makebox(0,0.5)[r]{$\tau = 1$}} \end{picture} \caption{Interrelations between distributions of the transformed gamma and inverse transformed gamma families. Diagrams derived from Figure~5.3 of \citet{LossModels4e}.} \label{fig:diagram:trgamma-family} \end{figure} In addition to the \code{d}, \code{p}, \code{q} and \code{r} functions, \pkg{actuar} introduces \code{m}, \code{lev} and \code{mgf} functions to compute, respectively, the theoretical raw moments \begin{equation*} m_k = \E{X^k}, \end{equation*} the theoretical limited moments \begin{equation*} \E{(X \wedge x)^k} = \E{\min(X, x)^k} \end{equation*} and the moment generating function \begin{equation*} M_X(t) = \E{e^{tX}}, \end{equation*} when it exists. Every distribution of \autoref{tab:continuous} is supported, along with the following distributions of base R: beta, exponential, chi-square, gamma, lognormal, normal (no \code{lev}), uniform and Weibull. The \code{m} and \code{lev} functions are especially useful for estimation methods based on the matching of raw or limited moments; see the \code{lossdist} vignette for their empirical counterparts. The \code{mgf} functions come in handy to compute the adjustment coefficient in ruin theory; see the \code{risk} vignette. \section{Phase-type distributions} \label{sec:phase-type} In addition to the 19 distributions of \autoref{tab:continuous}, the package provides support for a family of distributions deserving a separate presentation. Phase-type distributions \citep{Neuts_81} are defined as the distribution of the time until absorption of continuous time, finite state Markov processes with $m$ transient states and one absorbing state. Let \begin{equation} \label{eq:Markov-transition-matrix} \mat{Q} = \begin{bmatrix} \mat{T} & \mat{t} \\ \mat{0} & 0 \end{bmatrix} \end{equation} be the transition rates matrix (or intensity matrix) of such a process and let $(\mat{\pi}, \pi_{m + 1})$ be the initial probability vector. Here, $\mat{T}$ is an $m \times m$ non-singular matrix with $t_{ii} < 0$ for $i = 1, \dots, m$ and $t_{ij} \geq 0$ for $i \neq j$, $\mat{t} = - \mat{T} \mat{e}$ and $\mat{e}$ is a column vector with all components equal to 1. Then the cdf of the time until absorption random variable with parameters $\mat{\pi}$ and $\mat{T}$ is \begin{equation} \label{eq:cdf-phtype} F(x) = \begin{cases} \pi_{m + 1}, & x = 0, \\ 1 - \mat{\pi} e^{\mat{T} x} \mat{e}, & x > 0, \end{cases} \end{equation} where \begin{equation} \label{eq:matrix-exponential} e^{\mat{M}} = \sum_{n = 0}^\infty \frac{\mat{M}^n}{n!} \end{equation} is the matrix exponential of matrix $\mat{M}$. The exponential distribution, the Erlang (gamma with integer shape parameter) and discrete mixtures thereof are common special cases of phase-type distributions. The package provides \code{d}, \code{p}, \code{r}, \code{m} and \code{mgf} functions for phase-type distributions. The root is \code{phtype} and parameters $\mat{\pi}$ and $\mat{T}$ are named \code{prob} and \code{rates}, respectively; see also \autoref{sec:app:phase-type}. For the package, function \code{pphtype} is central to the evaluation of the ruin probabilities; see \code{?ruin} and the \code{risk} vignette. \section{Extensions to standard discrete distributions} \label{sec:discrete} The package introduces support functions for counting distributions commonly used in loss frequency modeling. A counting distribution is a discrete distribution defined on the non-negative integers $0, 1, 2, \dots$. Let $N$ be the counting random variable. We denote $p_k$ the probability that the random variable $N$ takes the value $k$, that is: \begin{equation*} p_k = \Pr[N = k]. \end{equation*} \citet{LossModels4e} classify counting distributions in two main classes. First, a discrete random variable is a member of the $(a, b, 0)$ class of distributions if there exists constants $a$ and $b$ such that \begin{equation*} \frac{p_k}{p_{k - 1}} = a + \frac{b}{k}, \quad k = 1, 2, \dots. \end{equation*} The probability at zero, $p_0$, is set such that $\sum_{k = 0}^\infty p_k = 1$. The members of this class are the Poisson, the binomial, the negative binomial and its special case, the geometric. These distributions are all well supported in base R with \code{d}, \code{p}, \code{q} and \code{r} functions. The second class of distributions is the $(a, b, 1)$ class. A discrete random variable is a member of the $(a, b, 1)$ class of distributions if there exists constants $a$ and $b$ such that \begin{equation*} \frac{p_k}{p_{k - 1}} = a + \frac{b}{k}, \quad k = 2, 3, \dots. \end{equation*} One will note that recursion starts at $k = 2$ for the $(a, b, 1)$ class. Therefore, the probability at zero can be any arbitrary number $0 \leq p_0 \leq 1$. Setting $p_0 = 0$ defines a subclass of so-called \emph{zero-truncated} distributions. The members of this subclass are the zero-truncated Poisson, the zero-truncated binomial, the zero-truncated negative binomial and the zero-truncated geometric. Let $p_k^T$ denote the probability mass in $k$ for a zero-truncated distribution. As above, $p_k$ denotes the probability mass for the corresponding member of the $(a, b, 0)$ class. We have \begin{equation*} p_k^T = \begin{cases} 0, & k = 0 \\ \displaystyle\frac{p_k}{1 - p_0}, & k = 1, 2, \dots. \end{cases} \end{equation*} Moreover, let $P(k)$ denotes the cumulative distribution function of a member of the $(a, b, 0)$ class. Then the cdf $P^T(k)$ of the corresponding zero-truncated distribution is \begin{equation*} P^T(k) = \frac{P(k) - P(0)}{1 - P(0)} = \frac{P(k) - p_0}{1 - p_0} \end{equation*} for all $k = 0, 1, 2, \dots$. Alternatively, the survival function $\bar{P}^T(k) = 1 - P^T(k)$ is \begin{equation*} \bar{P}^T(k) = \frac{\bar{P}(k)}{\bar{P}(0)} = \frac{\bar{P}(k)}{1 - p_0}. \end{equation*} Package \pkg{actuar} provides \code{d}, \code{p}, \code{q} and \code{r} functions for the all the zero-truncated distributions mentioned above. \autoref{tab:discrete} lists the root names of the functions; see \autoref{sec:app:discrete} for additional details. \begin{table} \centering \begin{tabular}{ll} \toprule Distribution & Root \\ \midrule Zero-truncated Poisson & \code{ztpois} \\ Zero-truncated binomial & \code{ztbinom} \\ Zero-truncated negative binomial & \code{ztnbinom} \\ Zero-truncated geometric & \code{ztgeom} \\ Logarithmic & \code{logarithmic} \\ \addlinespace[6pt] Zero-modified Poisson & \code{zmpois} \\ Zero-modified binomial & \code{zmbinom} \\ Zero-modified negative binomial & \code{zmnbinom} \\ Zero-modified geometric & \code{zmgeom} \\ Zero-modified logarithmic & \code{zmlogarithmic} \\ \bottomrule \end{tabular} \caption{Members of the $(a, b, 1)$ class of discrete distributions supported by \pkg{actuar} and root names of the R functions.} \label{tab:discrete} \end{table} An entry of \autoref*{tab:discrete} deserves a few additional words. The logarithmic (or log-series) distribution with parameter $\theta$ has pmf \begin{equation*} p_k = \frac{a \theta^x}{k}, \quad k = 1, 2, \dots, \end{equation*} with $a = -1/\log(1 - \theta)$ and for $0 \leq \theta < 1$. This is the standard parametrization in the literature \citep{Johnson:discrete:2005}. The logarithmic distribution is always defined on the strictly positive integers. As such, it is not qualified as ``zero-truncated'', but it nevertheless belongs to the $(a, b, 1)$ class of distributions, more specifically to the subclass with $p_0 = 0$. Actually, the logarithmic distribution is the limiting case of the zero-truncated negative binomial distribution with size parameter equal to zero and $\theta = 1 - p$, where $p$ is the probability of success for the zero-truncated negative binomial. Note that this differs from the presentation in \citet{LossModels4e}. Another subclass of the $(a, b, 1)$ class of distributions is obtained by setting $p_0$ to some arbitrary number $p_0^M$ subject to $0 < p_0^M \leq 1$. The members of this subclass are called \emph{zero-modified} distributions. Zero-modified distributions are discrete mixtures between a degenerate distribution at zero and the corresponding distribution from the $(a, b, 0)$ class. Let $p_k^M$ and $P^M(k)$ denote the pmf and cdf of a zero-modified distribution. Written as a mixture, the pmf is \begin{equation} \label{eq:mixture} p_k^M = \left(1 - \frac{1 - p_0^M}{1 - p_0} \right) \mathbb{1}_{\{k = 0\}} + \frac{1 - p_0^M}{1 - p_0}\, p_k. \end{equation} Alternatively, we have \begin{equation*} p_k^M = \begin{cases} p_0^M, & k = 0 \\ \displaystyle\frac{1 - p_0^M}{1 - p_0}\, p_k, & k = 1, 2, \dots \end{cases} \end{equation*} and \begin{align*} P^M(k) &= p_0^M + (1 - p_0^M) \frac{P(k) - P(0)}{1 - P(0)} \\ &= p_0^M + \frac{1 - p_0^M}{1 - p_0}\, (P(k) - p_0) \\ &= p_0^M + (1 - p_0^M)\, P^T(k) \end{align*} for all $k = 0, 1, 2, \dots$. The survival function is \begin{equation*} \bar{P}^M(k) = (1 - p_0^M)\, \frac{\bar{P}(k)}{\bar{P}(0)} = \frac{1 - p_0^M}{1 - p_0}\, \bar{P}(k) = (1 - p_0^M)\, \bar{P}^T(k). \end{equation*} Therefore, we can also write the pmf of a zero-modified distribution as a mixture of a degenerate distribution at zero and the corresponding zero-truncated distribution: \begin{equation} \label{eq:mixture:alt} p_k^M = p_0^M \mathbb{1}_{\{k = 0\}} + (1 - p_0^M)\, p_k^T. \end{equation} The members of the subclass are the zero-modified Poisson, zero-modified binomial, zero-modified negative binomial and zero-modified geometric, together with the zero-modified logarithmic as a limiting case of the zero-modified negative binomial. \autoref{tab:discrete} lists the root names of the support functions provided in \pkg{actuar}; see also \autoref{sec:app:discrete}. Quite obviously, zero-truncated distributions are zero-modified distributions with $p_0^M = 0$. However, using the dedicated functions in R will be more efficient. \section{Poisson-inverse Gaussian distribution} \label{sec:pig} The Poisson-inverse Gaussian (PIG) distribution results from the continuous mixture between a Poisson distribution and an inverse Gaussian. That is, the Poisson-inverse Gaussian is the (marginal) distribution of the random variable $X$ when the conditional random variable $X|\Lambda = \lambda$ is Poisson with parameter $\lambda$ and the random variable $\Lambda$ is inverse Gaussian with parameters $\mu$ and $\phi$. The literature proposes many different expressions for the pmf of the PIG \citep{Holla:PIG:1966,Shaban:PIG:1981,Johnson:discrete:2005,LossModels4e}. Using the parametrization for the inverse Gaussian found in \autoref{sec:app:continuous}, we have: \begin{equation} \label{eq:pig:px} \begin{split} p_x &= \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi \left( 1 + \frac{1}{2\phi\mu^2} \right)} \right)^{-\left( x - \frac{1}{2} \right)} \\ &\phantom{=} \times K_{x - \frac{1}{2}} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right), \end{split} \end{equation} for $x = 0, 1, \dots$, $\mu > 0$, $\phi > 0$ and where \begin{equation} \label{eq:bessel_k} K_\nu(ax) = \frac{a^{-\nu}}{2} \int_0^\infty t^{\nu - 1} e^{- z(t + at^{-1})/2} dt, \quad a^2 z > 0 \end{equation} is the modified Bessel function of the third kind \citep{Bateman:1953:2,Abramowitz:1972}. One may compute the probabilities $p_x$, $x = 0, 1, \dots$ recursively using the following equations: \begin{equation} \label{eq:pig:px:recursive} \begin{split} p_0 &= \exp\left\{ \frac{1}{\phi\mu} \left(1 - \sqrt{1 + 2\phi\mu^2}\right) \right\} \\ p_1 &= \frac{\mu}{\sqrt{1 + 2\phi\mu^2}}\, p_0 \\ p_x &= \frac{2\phi\mu^2}{1 + 2\phi\mu^2} \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{\mu^2}{1 + 2\phi\mu^2} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{split} \end{equation} The first moment of the distribution is $\mu$. The second and third central moment are, respectively, \begin{align*} \mu_2 &= \sigma^2 = \mu + \phi\mu^3 \\ \mu_3 &= \mu + 3 \phi \mu^2 \sigma^2. \end{align*} For the limiting case $\mu = \infty$, the underlying inverse Gaussian has an inverse chi-squared distribution. The latter has no finite strictly positive, integer moments and, consequently, neither does the Poisson-inverse Gaussian. See \autoref{sec:app:discrete:pig} for the formulas in this case. \section{Special integrals} \label{sec:special-integrals} Many of the cumulative distribution functions of \autoref{sec:app:continuous} are expressed in terms of the incomplete gamma function or the incomplete beta function. From a probability theory perspective, the incomplete gamma function is usually defined as \begin{equation} \label{eq:pgamma} \Gamma(\alpha; x) = \frac{1}{\Gamma(\alpha)} \int_0^x t^{\alpha - 1} e^{-t}\, dt, \quad \alpha > 0, x > 0, \end{equation} with \begin{equation*} \Gamma(\alpha) = \int_0^\infty t^{\alpha - 1} e^{-t}\, dt, \end{equation*} whereas the (regularized) incomplete beta function is defined as \begin{equation} \label{eq:pbeta} \beta(a, b; x) = \frac{1}{\beta(a, b)} \int\limits_0^x t^{a - 1} (1 - t)^{b - 1}\, dt, \quad a > 0, b > 0, 0 < x < 1, \end{equation} with \begin{equation*} \beta(a, b) = \int_0^1 t^{a - 1} (1 - t)^{b - 1}\, dt = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a + b)}. \end{equation*} Now, there exist alternative definitions of the these functions that are valid for negative values of the parameters. \citet{LossModels4e} introduce them to extend the range of admissible values for limited expected value functions. First, following \citet[Section~6.5]{Abramowitz:1972}, we define the ``extended'' incomplete gamma function as \begin{equation} \label{eq:gammainc} G(\alpha; x) = \int_x^\infty t^{\alpha - 1} e^{-t}\, dt \end{equation} for $\alpha$ real and $x > 0$. When $\alpha > 0$, we clearly have \begin{equation} \label{eq:gammainc:apos} G(\alpha; x) = \Gamma(a) [1 - \Gamma(\alpha; x)]. \end{equation} The integral is also defined for $\alpha \le 0$. As outlined in \citet[Appendix~A]{LossModels4e}, integration by parts of \eqref{eq:gammainc} yields the relation \begin{equation*} G(\alpha; x) = -\frac{x^\alpha e^{-x}}{\alpha} + \frac{1}{\alpha} G(\alpha + 1; x). \end{equation*} This process can be repeated until $\alpha + k$ is a positive number, in which case the right hand side can be evaluated with \eqref{eq:gammainc:apos}. If $\alpha = 0, -1, -2, \dots$, this calculation requires the value of \begin{equation*} \label{eq:expint} G(0; x) = \int_x^\infty \frac{e^{-t}}{t}\, dt = E_1(x), \end{equation*} which is known in the literature as the \emph{exponential integral} \citep[Section~5.1]{Abramowitz:1972}. Second, as seen in \citet[Section~6.6]{Abramowitz:1972}, we have the following relation for the integral on the right hand side of \eqref{eq:pbeta}: \begin{equation*} \int\limits_0^x t^{a - 1} (1 - t)^{b - 1}\, dt = \frac{x^a}{a}\, F(a, 1 - b; a + 1; x), \end{equation*} where \begin{equation*} F(a, b; c; z) = \frac{\Gamma(c)}{\Gamma(a) \Gamma(b)} \sum_{k = 0}^\infty \frac{\Gamma(a + k) \Gamma(b + k)}{\Gamma(c + k)} \frac{z^k}{k!} \end{equation*} is the Gauss hypergeometric series. With the above definition, the incomplete beta function also admits negative, non integer values for parameters $a$ and $b$. Now, let \begin{equation} \label{eq:betaint} B(a, b; x) = \Gamma(a + b) \int_0^x t^{a-1} (1-t)^{b-1} dt \end{equation} for $a > 0$, $b \neq -1, -2, \dots$ and $0 < x < 1$. Again, it is clear that when $b > 0$, \begin{equation*} B(a, b; x) = \Gamma(a) \Gamma(b) \beta(a, b; x). \end{equation*} Of more interest here is the case where $b < 0$, $b \neq -1, -2, \dots$ and $a > 1 + \lfloor -b\rfloor$. Integration by parts of \eqref{eq:betaint} yields \begin{equation} \label{eq:betaint:2} \begin{split} B(a, b; x) &= \displaystyle -\Gamma(a + b) \left[ \frac{x^{a-1} (1-x)^b}{b} + \frac{(a-1) x^{a-2} (1-x)^{b+1}}{b (b+1)} \right. \\ &\phantom{=} \displaystyle\left. + \cdots + \frac{(a-1) \cdots (a-r) x^{a-r-1} (1-x)^{b+r}}{b (b+1) \cdots (b+r)} \right] \\ &\phantom{=} \displaystyle + \frac{(a-1) \cdots (a-r-1)}{b (b+1) \cdots (b+r)} \Gamma(a-r-1) \\ &\phantom{=} \times \Gamma(b+r+1) \beta(a-r-1, b+r+1; x), \end{split} \end{equation} where $r = \lfloor -b\rfloor$. For the needs of \pkg{actuar}, we dubbed \eqref{eq:betaint} the \emph{beta integral}. Package \pkg{actuar} includes a C implementation of \eqref{eq:betaint:2} and imports functionalities of package \pkg{expint} \citep{expint} to compute the incomplete gamma function \eqref{eq:gammainc} at the C level. The routines are used to evaluate the limited expected value for distributions of the Feller--Pareto and transformed gamma families. \section{Package API: accessing the C routines} \label{sec:api} The actual workhorses behind the R functions presented in this document are C routines that the package exposes to other packages through an API. The header file \file{include/actuarAPI.h} in the package installation directory contains declarations for % the continuous distributions of \autoref{sec:app:continuous}, % the phase-type distributions of \autoref{sec:app:phase-type}, % the discrete distributions of \autoref{sec:app:discrete}, % and the beta integral of \autoref{sec:special-integrals}. The prototypes of the C routines for probability distributions all follow the same pattern modeled after those of base R \citep[Chapter~6]{R-exts}. As an example, here are the prototypes for the Pareto distribution: \begin{Schunk} \begin{Sinput} double dpareto(double x, double shape, double scale, int give_log); double ppareto(double q, double shape, double scale, int lower_tail, int log_p); double qpareto(double p, double shape, double scale, int lower_tail, int log_p); double rpareto(double shape, double scale); double mpareto(double order, double shape, double scale, int give_log); double levpareto(double limit, double shape, double scale, double order, int give_log); \end{Sinput} \end{Schunk} For the beta integral \eqref{eq:betaint:2}, the frontend is a routine \code{betaint} that returns \code{NA} or \code{NaN} for out-of-range arguments, but actual computation is done by routine \code{betaint\_raw}. Both are exposed as follows in the API: \begin{Schunk} \begin{Sinput} double betaint(double x, double a, double b); double betaint_raw(double x, double a, double b, double x1m); \end{Sinput} \end{Schunk} The developer of some package \pkg{pkg} who wants to use a routine --- say \code{dpareto} --- in her code should proceed as follows. \begin{enumerate} \item Add \pkg{actuar} to the \code{Imports} and \code{LinkingTo} directives of the \file{DESCRIPTION} file of \pkg{pkg}; \item Add an entry \code{import(actuar)} in the \file{NAMESPACE} file of \pkg{pkg}; \item Define the routine with a call to \code{R\_GetCCallable} in the initialization routine \code{R\_init\_pkg} of \pkg{pkg} \citep[Section~5.4]{R-exts}. For the current example, the file \file{src/init.c} of \pkg{pkg} would contain the following code: \begin{Schunk} \begin{Sinput} void R_init_pkg(DllInfo *dll) { R_registerRoutines( /* native routine registration */ ); pkg_dpareto = (double(*)(double,int,int)) R_GetCCallable("actuar", "dpareto"); } \end{Sinput} \end{Schunk} \item Define a native routine interface that will call \code{dpareto}, say \code{pkg\_dpareto} to avoid any name clash, in \file{src/init.c} as follows: \begin{Schunk} \begin{Sinput} double(*pkg_dpareto)(double,double,double,int); \end{Sinput} \end{Schunk} \item Declare the routine in a header file of \pkg{pkg} with the keyword \code{extern} to expose the interface to all routines of the package. In our example, file \file{src/pkg.h} would contain: \begin{Schunk} \begin{Sinput} extern double(*pkg_dpareto)(double,double,double,int); \end{Sinput} \end{Schunk} \item Include the package header file \file{pkg.h} in any C file making use of routine \code{pkg\_dpareto}. \end{enumerate} The companion package \pkg{expint} \citep{expint} ships with a complete test package implementing the above. See the vignette of the latter package for more information. \section{Implementation details} \label{sec:implementation} The cdf of the continuous distributions of \autoref{tab:continuous} use \code{pbeta} and \code{pgamma} to compute the incomplete beta and incomplete gamma functions, respectively. Functions \code{dinvgauss}, \code{pinvgauss} and \code{qinvgauss} rely on C implementations of functions of the same name from package \pkg{statmod} \citep{statmod}. The matrix exponential C routine needed in \code{dphtype} and \code{pphtype} is based on \code{expm} from package \pkg{Matrix} \citep{Matrix}. The C code to compute the beta integral \eqref{eq:betaint:2} was written by the second author. For all but the trivial input values, the pmf, cdf and quantile functions for the zero-truncated and zero-modified distributions of \autoref{tab:discrete} use the internal R functions for the corresponding standard distribution. Generation of random variates from zero-truncated distributions uses the following simple inversion algorithm on a restricted range \citep{Dalgaard:r-help:2005,Thomopoulos:2013:simulation}. Let $u$ be a random number from a uniform distribution on $(p_0, 1)$. Then $x = P^{-1}(u)$ is distributed according to the zero-truncated version of the distribution with cdf $P(k)$. For zero-modified distributions, we generate variates from the discrete mixture \eqref{eq:mixture} when $p_0^M \geq p_0$. When $p_0^M < p_0$, we can use either of two methods: \begin{enumerate} \item the classical rejection method with an envelope that differs from the target distribution only at zero (meaning that only zeros are rejected); \item generation from the discrete mixture \eqref{eq:mixture:alt} with the corresponding zero-truncated distribution (hence using the inversion method on a restricted range explained above). \end{enumerate} Which approach is faster depends on the relative speeds of the standard random generation function and the standard quantile function, and also on the proportion of zeros that are rejected using the rejection algorithm. Based on the difference $p_0 - p_0^M$, we determined (empirically) distribution-specific cutoff points between the two methods. Finally, computation of the Poisson-inverse Gaussian pmf uses the recursive equations \eqref{eq:pig:px:recursive}. Versions of \pkg{actuar} prior to 3.0-0 used the direct expression \eqref{eq:pig:px} and the C level function \code{bessel\_k} part of the R API. However, the latter overflows for large values of $\nu$ and this caused \code{NaN} results for the value of \begin{equation*} \frac{B^{- \left(x - \frac{1}{2} \right)} K_{x - \frac{1}{2}}(B/\phi)}{x!} \end{equation*} and, therefore, for the Poisson-inverse Gaussian pmf. \appendix \section{Continuous distributions} \label{sec:app:continuous} This appendix gives the root name and the parameters of the R support functions for the distributions of \autoref{tab:continuous}, as well as the formulas for the pdf, the cdf, the raw moment of order $k$ and the limited moment of order $k$ using the parametrization of \citet{LossModels4e} and \citet{HoggKlugman}. In the following, $\Gamma(\alpha; x)$ is the incomplete gamma function \eqref{eq:pgamma}, $\beta(a, b; x)$ is the incomplete beta function \eqref{eq:pbeta}, $G(\alpha; x)$ is the ``extended'' incomplete gamma function \eqref{eq:gammainc}, $B(a, b; x)$ is the beta integral \eqref{eq:betaint} and $K_\nu(x)$ is the modified Bessel function of the third kind \eqref{eq:bessel_k}. Unless otherwise stated, all parameters are finite and strictly positive, and the functions are defined for $x > 0$. \subsection{Feller--Pareto family} \label{sec:app:continuous:feller-pareto} \subsubsection{Feller--Pareto} \begin{itemize} \item Root: \code{fpareto} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u^\tau (1 - u)^\alpha}{% (x - \mu) \beta (\alpha, \tau )}, \quad u = \frac{v}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(\tau+j/\gamma) \Gamma(\alpha-j/\gamma)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad \text{integer } 0 \leq k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(\tau+j/\gamma, \alpha-j/\gamma; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad \text{integer } k \geq 0, \quad \alpha - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto IV} \begin{itemize} \item Root: \code{pareto4} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \gamma u^\alpha (1 - u)}{(x - \mu)}, \quad u = \frac{1}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(1+j/\gamma) \Gamma(\alpha-j/\gamma)}{% \Gamma(\alpha)}, \quad \text{integer } 0 \leq k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(1+j/\gamma, \alpha-j/\gamma; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad \text{integer } k \geq 0 \quad \alpha - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto III} \begin{itemize} \item Root: \code{pareto3} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u (1 - u)}{(x - \mu)}, \quad u = \frac{v}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= u \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \Gamma(1+j/\gamma) \Gamma(1-j/\gamma), \quad \text{integer } 0 \leq k < \gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, B(1+j/\gamma, 1-j/\gamma; u) \\ &\phantom{=} + x^k (1 - u), \quad \text{integer } k \geq 0 \quad 1 - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto II} \begin{itemize} \item Root: \code{pareto2} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha u^\alpha (1 - u)}{(x - \mu)}, \quad u = \frac{1}{1 + v}, \quad v = \frac{x - \mu}{\theta}, \quad x > \mu \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(1+j) \Gamma(\alpha-j)}{% \Gamma(\alpha)}, \quad \text{integer } 0 \leq k < \alpha \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(1+j, \alpha-j; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad \text{integer } k \geq 0 \quad \alpha - j \neq -1, -2, \dots \end{align*} \subsubsection{Transformed beta} \begin{itemize} \item Root: \code{trbeta}, \code{pearson6} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u^\tau (1 - u)^\alpha}{% x \beta (\alpha, \tau )}, \qquad u = \frac{v}{1 + v}, \qquad v = \left(\frac{x}{\theta} \right)^\gamma \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\gamma) \Gamma(\alpha-k/\gamma)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad -\tau\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\gamma, \alpha-k/\gamma; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad k > -\tau\gamma \end{align*} \subsubsection{Burr} \begin{itemize} \item Root: \code{burr} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \gamma u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k/\gamma) \Gamma(\alpha-k/\gamma)}{% \Gamma(\alpha)}, \quad -\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k/\gamma, \alpha-k/\gamma; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -\gamma \end{align*} \subsubsection{Loglogistic} \begin{itemize} \item Root: \code{llogis} \item Parameters: \code{shape} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= u \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1+k/\gamma) \Gamma(1-k/\gamma), \quad -\gamma < k < \gamma \\ \E{(X \wedge x)^k} &= \theta^k B(1+k/\gamma, 1-k/\gamma; u) \\ &\phantom{=} + x^k (1 - u), \quad k > -\gamma \end{align*} \subsubsection{Paralogistic} \begin{itemize} \item Root: \code{paralogis} \item Parameters: \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha^2 u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\alpha \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k/\alpha) \Gamma(\alpha-k/\alpha)}{% \Gamma(\alpha)}, \quad -\alpha < k < \alpha^2 \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k/\alpha, \alpha-k/\alpha; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -\alpha \end{align*} \subsubsection{Generalized Pareto} \begin{itemize} \item Root: \code{genpareto} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u^\tau (1 - u)^\alpha}{x \beta (\alpha, \tau )}, \qquad u = \frac{v}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k) \Gamma(\alpha-k)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad -\tau < k < \alpha \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k, \alpha-k; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad k > -\tau \end{align*} \subsubsection{Pareto} \begin{itemize} \item Root: \code{pareto} \item Parameters: \code{shape} ($\alpha$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k) \Gamma(\alpha-k)}{% \Gamma(\alpha)}, \quad -1 < k < \alpha \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k, \alpha-k; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -1 \end{align*} \subsubsection{Single-parameter Pareto (Pareto I)} \begin{itemize} \item Root: \code{pareto1} \item Parameters: \code{shape} ($\alpha$), \code{min} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \theta^\alpha}{x^{\alpha+1}}, \quad x > \theta \\ F(x) &= 1 - \left( \frac{\theta}{x} \right)^\alpha, \quad x > \theta \\ \displaybreak[0] \E{X^k} &= \frac{\alpha \theta^k}{\alpha - k}, \quad k < \alpha \\ \E{(X \wedge x)^k} &= \frac{\alpha \theta^k}{\alpha - k} - \frac{k \theta^\alpha}{(\alpha - k) x^{\alpha-k}}, \quad x \geq \theta \end{align*} Although there appears to be two parameters, only $\alpha$ is a true parameter. The value of $\theta$ is the minimum of the distribution and is usually set in advance. \subsubsection{Inverse Burr} \begin{itemize} \item Root: \code{invburr} \item Parameters: \code{shape1} ($\tau$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau \gamma u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\gamma) \Gamma(1-k/\gamma)}{% \Gamma(\tau)}, \quad -\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\gamma, 1-k/\gamma; u)}{% \Gamma(\tau)} \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau\gamma \end{align*} \subsubsection{Inverse Pareto} \begin{itemize} \item Root: \code{invpareto} \item Parameters: \code{shape} ($\tau$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k) \Gamma(1-k)}{% \Gamma(\tau)}, \quad -\tau < k < 1 \\ \E{(X \wedge x)^k} &= \theta^k \tau \int_0^u y^{\tau+k-1} (1 - y)^{-k}\, dy \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau \end{align*} \subsubsection{Inverse paralogistic} \begin{itemize} \item Root: \code{invparalogis} \item Parameters: \code{shape} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau^2 u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left(\frac{x}{\theta} \right)^\tau \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\tau) \Gamma(1-k/\tau)}{% \Gamma(\tau)}, \quad -\tau^2 < k < \tau \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\tau, 1-k/\tau; u)}{% \Gamma(\tau)} \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau^2 \end{align*} \subsection{Transformed gamma family} \label{sec:app:continuous:transformed-gamma} \subsubsection{Transformed gamma} \begin{itemize} \item Root: \code{trgamma} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\alpha e^{-u}}{x \Gamma(\alpha)}, \qquad u = \left( \frac{x}{\theta} \right)^\tau \\ F(x) &= \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha+k/\tau)}{\Gamma(\alpha)} \quad k > -\alpha\tau \\ \E{(X \wedge x)^k} &= \frac{\theta^k \Gamma(\alpha+k/\tau)}{\Gamma(\alpha)} \Gamma(\alpha+k/\tau; u) \\ &\phantom{=} + x^k [1 - \Gamma(\alpha; u)], \quad k > -\alpha\tau \end{align*} \subsubsection{Inverse transformed gamma} \begin{itemize} \item Root: \code{invtrgamma} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\alpha e^{-u}}{x\Gamma (\alpha)}, \qquad u = \left( \frac{\theta}{x} \right)^\tau \\ F(x) &= 1 - \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha-k/\tau)}{\Gamma(\alpha)} \quad k < \alpha\tau \\ \E{(X \wedge x)^k} &= \frac{\theta^k G(\alpha-k/\tau; u)}{\Gamma(\alpha)} + x^k \Gamma(\alpha; u), \quad \text{all }k \end{align*} \subsubsection{Inverse gamma} \begin{itemize} \item Root: \code{invgamma} \item Parameters: \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u^\alpha e^{-u}}{x\Gamma (\alpha)}, \qquad u = \frac{\theta}{x}\\ F(x) &= 1 - \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha-k)}{\Gamma(\alpha)} \quad k < \alpha \\ \E{(X \wedge x)^k} &= \frac{\theta^k G(\alpha-k; u)}{\Gamma(\alpha)} + x^k \Gamma(\alpha; u), \quad \text{all }k \\ M(t) &= \frac{2}{\Gamma(\alpha)} (-\theta t)^{\alpha/2} K_\alpha(\sqrt{-4\theta t}) \end{align*} \subsubsection{Inverse Weibull} \begin{itemize} \item Root: \code{invweibull}, \code{lgompertz} \item Parameters: \code{shape} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u e^{-u}}{x}, \qquad u = \left( \frac{\theta}{x} \right)^\tau \\ F(x) &= e^{-u} \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1-k/\tau) \quad k < \tau \\ \E{(X \wedge x)^k} &= \theta^k G(1-k/\tau; u) + x^k (1 - e^{-u}), \quad \text{all }k \end{align*} \subsubsection{Inverse exponential} \begin{itemize} \item Root: \code{invexp} \item Parameters: \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u e^{-u}}{x}, \qquad u = \frac{\theta}{x} \\ F(x) &= e^{-u} \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1-k) \quad k < 1 \\ \E{(X \wedge x)^k} &= \theta^k G(1-k; u) + x^k (1 - e^{-u}), \quad \text{all }k \end{align*} \subsection{Other distributions} \label{sec:app:continuous:other} \subsubsection{Loggamma} \begin{itemize} \item Root: \code{lgamma} \item Parameters: \code{shapelog} ($\alpha$), \code{ratelog} ($\lambda$) \end{itemize} \begin{align*} f(x) &= \frac{\lambda^\alpha (\ln x)^{\alpha - 1}}{% x^{\lambda + 1} \Gamma(\alpha)}, \quad x > 1 \\ F(x) &= \Gamma( \alpha ; \lambda \ln x), \quad x > 1 \\ \displaybreak[0] \E{X^k} &= \left( \frac{\lambda}{\lambda - k} \right)^\alpha, \quad k < \lambda \\ \E{(X \wedge x)^k} &= \left( \frac{\lambda}{\lambda - k} \right)^\alpha \Gamma(\alpha; (\lambda - k) \ln x) \\ &\phantom{=} + x^k (1 - \Gamma(\alpha; \lambda \ln x)), \quad k < \lambda \end{align*} \subsubsection{Gumbel} \begin{itemize} \item Root: \code{gumbel} \item Parameters: \code{alpha} ($-\infty < \alpha < \infty$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{e^{-(u + e^{-u})}}{\theta}, \qquad u = \frac{x - \alpha}{\theta}, \qquad -\infty < x < \infty \\ F(x) &= \exp[-\exp(-u)] \\ \displaybreak[0] \E{X} &= \alpha + \gamma \theta, \quad \gamma \approx 0.57721566490153 \\ \VAR{X} &= \frac{\pi^2 \theta^2}{6} \\ M(t) &= e^{\alpha t} \Gamma(1 - \theta t) \end{align*} \subsubsection{Inverse Gaussian} \begin{itemize} \item Root: \code{invgauss} \item Parameters: \code{mean} ($\mu$), \code{shape} ($\lambda = 1/\phi$), \code{dispersion} ($\phi$) \end{itemize} \begin{align*} f(x) &= \left( \frac{1}{2 \pi \phi x^3} \right)^{1/2} \exp\left\{ -\frac{(x/\mu - 1)^2}{2 \phi x} \right\} \\ F(x) &= \Phi\left( \frac{x/\mu - 1}{\sqrt{\phi x}} \right) + e^{2/(\phi\mu)} \Phi\left( -\frac{x/\mu + 1}{\sqrt{\phi x}} \right) \\ \displaybreak[0] \E{X^k} &= \mu^k \sum_{i = 0}^{k - 1} \frac{(k + i - 1)!}{i! (k - i - 1)!} \left( \frac{\phi \mu}{2} \right)^{i}, \quad k = 1, 2, \dots \\ \E{X \wedge x} &= \mu \left[ \Phi\left( \frac{x/\mu - 1}{\sqrt{\phi x}} \right) - e^{2/(\phi\mu)} \Phi\left(- \frac{x/\mu + 1}{\sqrt{\phi x}} \right) \right] \\ &\phantom{=} + x (1 - F(x)) \\ M(t) &= \exp \left\{ \frac{1}{\phi \mu} \left(1 - \sqrt{1 - 2 \phi \mu^2 t}\right) \right\}, \quad t \leq \frac{1}{2 \phi \mu^2} \end{align*} \noindent% The limiting case $\mu = \infty$ is an inverse gamma distribution with $\alpha = 1/2$ and $\lambda = 2\phi$ (or inverse chi-squared). \subsubsection{Generalized beta} \begin{itemize} \item Root: \code{genbeta} \item Parameters: \code{shape1} ($a$), \code{shape2} ($b$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^a (1 - u)^{b - 1}}{x \beta (a, b)}, \qquad u = \left( \frac{x}{\theta} \right)^\tau, \qquad 0 < x < \theta \\ F(x) &= \beta (a, b ; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \beta(a+k/\tau, b)}{\beta(a, b)}, \quad k > -a\tau \\ \E{(X \wedge x)^k} &= \frac{% \theta^k \beta(a+k/\tau, b)}{\beta(a, b)} \beta(a+k/\tau, b; u) \\ &\phantom{=} + x^k [1 - \beta(a, b; u)], \quad k > -\tau\gamma \end{align*} \section{Phase-type distributions} \label{sec:app:phase-type} Consider a continuous-time Markov process with $m$ transient states and one absorbing state. Let \begin{equation*} \mat{Q} = \begin{bmatrix} \mat{T} & \mat{t} \\ \mat{0} & 0 \end{bmatrix} \end{equation*} be the transition rates matrix (or intensity matrix) of such a process and let $(\mat{\pi}, \pi_{m + 1})$ be the initial probability vector. Here, $\mat{T}$ is an $m \times m$ non-singular matrix with $t_{ii} < 0$ for $i = 1, \dots, m$ and $t_{ij} \geq 0$ for $i \neq j$; $\mat{\pi}$ is an $1 \times m$ vector of probabilities such that $\mat{\pi} \mat{e} + \pi_{m + 1} = 1$; $\mat{t} = -\mat{T} \mat{e}$; $\mat{e} = [1]_{m \times 1}$ is a column vector of ones. % \bigskip \begin{itemize} \item Root: \code{phtype} \item Parameters: \code{prob} ($\mat{\pi}_{1 \times m}$), \code{rates} ($\mat{T}_{m \times m}$) \end{itemize} \begin{align*} f(x) &= \begin{cases} 1 - \mat{\pi} \mat{e} & x = 0, \\ \mat{\pi} e^{\mat{T} x} \mat{t}, & x > 0 \end{cases} \\ F(x) &= \begin{cases} 1 - \mat{\pi} \mat{e}, & x = 0, \\ 1 - \mat{\pi} e^{\mat{T} x} \mat{e}, & x > 0 \end{cases} \\ \E{X^k} &= k! \mat{\pi} (-\mat{T})^{-k} \mat{e} \\ M(t) &= \mat{\pi} (-t \mat{I} - \mat{T})^{-1} \mat{t} + (1 - \mat{\pi} \mat{e}) \end{align*} \section{Discrete distributions} \label{sec:app:discrete} This appendix gives the root name and the parameters of the R support functions for the members of the $(a, b, 0)$ and $(a, b, 1)$ discrete distributions as defined in \citet{LossModels4e}; the values of $a$, $b$ and $p_0$ in the representation; the pmf; the relationship with other distributions, when there is one. The appendix also provides the main characteristics of the Poisson-inverse Gaussian distribution. \subsection[The (a, b, 0) class]{The $(a, b, 0)$ class} \label{sec:app:discrete:a-b-0} The distributions in this section are all supported in base R. Their pmf can be computed recursively by fixing $p_0$ to the specified value and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 1, 2, \dots$. All parameters are finite. \subsubsection{Poisson} \begin{itemize} \item Root: \code{pois} \item Parameter: \code{lambda} ($\lambda \geq 0$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_0 = e^{-\lambda} \\ p_k &= \frac{e^{-\lambda} \lambda^k}{k!} \end{align*} \subsubsection{Negative binomial} \begin{itemize} \item Root: \code{nbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$), \code{mu} ($r(1 - p)/p$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_0 = p^r \\ p_k &= \binom{r+k-1}{k} p^r (1 - p)^k \end{align*} \begin{itemize} \item Special case: Geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Geometric} \begin{itemize} \item Root: \code{geom} \item Parameter: \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_0 = p \\ p_k &= p (1 - p)^k \end{align*} \subsubsection{Binomial} \begin{itemize} \item Root: \code{binom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_0 = (1 - p)^n \\ p_k &= \binom{n}{k} p^k (1 - p)^{n - k}, \quad k = 1, 2, \dots, n \end{align*} \begin{itemize} \item Special case: Bernoulli$(p)$ when $n = 1$. \end{itemize} \subsection[The zero-truncated (a, b, 1) class]{The zero-truncated $(a, b, 1)$ class} \label{sec:app:discrete:zt} Package \pkg{actuar} provides support for the distributions in this section. Zero-truncated distributions have probability at zero $p_0^T = 0$. Their pmf can be computed recursively by fixing $p_1$ to the value specified below and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 2, 3, \dots$. The distributions are all defined on $k = 1, 2, \dots$. The limiting case of zero-truncated distributions when $p_1$ is infinite is a point mass in $k = 1$. \subsubsection{Zero-truncated Poisson} \begin{itemize} \item Root: \code{ztpois} \item Parameter: \code{lambda} ($\lambda \geq 0$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_1 = \frac{\lambda}{e^\lambda - 1} \\ p_k &= \frac{\lambda^k}{k! (e^\lambda - 1)} \end{align*} \subsubsection{Zero-truncated negative binomial} \begin{itemize} \item Root: \code{ztnbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_1 = \frac{r p^r (1 - p)}{1 - p^r} \\ p_k &= \binom{r+k-1}{k} \frac{p^r (1 - p)^k}{1 - p^r} \end{align*} \begin{itemize} \item Special cases: Logarithmic$(1 - p)$ when $r = 0$; Zero-truncated geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Zero-truncated geometric} \begin{itemize} \item Root: \code{ztgeom} \item Parameter: \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_1 = p \\ p_k &= p (1 - p)^{k - 1} \end{align*} \subsubsection{Zero-truncated binomial} \begin{itemize} \item Root: \code{ztbinom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_1 = \frac{n p (1 - p)^{n - 1}}{1 - (1 - p)^n} \\ p_k &= \binom{n}{k} \frac{p^k (1 - p)^{n - k}}{1 - (1 - p)^n}, \quad k = 1, 2, \dots, n \end{align*} \subsubsection{Logarithmic} \begin{itemize} \item Root: \code{logarithmic} \item Parameter: \code{prob} ($0 \leq p < 1$) \end{itemize} \begin{align*} a &= p, \qquad b = -p, \qquad p_1 = - \frac{p}{\log (1 - p)} \\ p_k &= - \frac{p^k}{k \log (1 - p)} \end{align*} \subsection[The zero-modified (a, b, 1) class]{The zero-modified $(a, b, 1)$ class} \label{sec:app:discrete:zm} Package \pkg{actuar} provides support for the distributions in this section. Zero-modified distributions have an arbitrary probability at zero $p_0^M \neq p_0$, where $p_0$ is the probability at zero for the corresponding member of the $(a, b, 0)$ class. Their pmf can be computed recursively by fixing $p_1$ to the value specified below and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 2, 3, \dots$. The distributions are all defined on $k = 0, 1, 2, \dots$. The limiting case of zero-modified distributions when $p_1$ is infinite is a discrete mixture between a point mass in $k = 0$ (with probability $p_0^M$) and a point mass in $k = 1$ (with probability $1 - p_0^M$). \subsubsection{Zero-modified Poisson} \begin{itemize} \item Root: \code{zmpois} \item Parameters: \code{lambda} ($\lambda > 0$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_1 = \frac{(1 - p_0^M) \lambda}{e^\lambda - 1} \\ p_k &= \frac{(1 - p_0^M) \lambda^k}{k! (e^\lambda - 1)} \end{align*} \subsubsection{Zero-modified negative binomial} \begin{itemize} \item Root: \code{zmnbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_1 = \frac{(1 - p_0^M) r p^r (1 - p)}{1 - p^r} \\ p_k &= \binom{r+k-1}{k} \frac{(1 - p_0^M) p^r (1 - p)^k}{1 - p^r} \end{align*} \begin{itemize} \item Special cases: Zero-modified logarithmic$(1 - p)$ when $r = 0$; Zero-modified geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Zero-modified geometric} \begin{itemize} \item Root: \code{zmgeom} \item Parameters: \code{prob} ($0 < p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_1 = (1 - p_0^M) p \\ p_k &= (1 - p_0^M) p (1 - p)^{k - 1} \end{align*} \subsubsection{Zero-modified binomial} \begin{itemize} \item Root: \code{zmbinom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_1^M = \frac{n (1 - p_0^M) p (1 - p)^{n - 1}}{1 - (1 - p)^n} \\ p_k &= \binom{n}{k} \frac{(1 - p_0^M) p^k (1 - p)^{n - k}}{1 - (1 - p)^n}, \quad k = 1, 2, \dots, n \end{align*} \subsubsection{Zero-modified logarithmic} \begin{itemize} \item Root: \code{zmlogarithmic} \item Parameters: \code{prob} ($0 \leq p < 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= p, \qquad b = -p, \qquad p_1 = - \frac{(1 - p_0^M) p}{\log (1 - p)} \\ p_k &= - \frac{(1 - p_0^M) p^k}{k \log (1 - p)} \end{align*} \subsection{Other distribution} \label{sec:app:discrete:pig} \subsubsection{Poisson-inverse Gaussian} \begin{itemize} \item Root: \code{poisinvgauss}, \code{pig} \item Parameters: \code{mean} ($\mu > 0$), \code{shape} ($\lambda = 1/\phi$), \code{dispersion} ($\phi > 0$) \end{itemize} \begin{align*} p_x &= \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi \left( 1 + \frac{1}{2\phi\mu^2} \right)} \right)^{- \left( x - \frac{1}{2} \right)} \\ &\phantom{=} \times K_{x - 1/2} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right), \quad x = 0, 1, \dots, \end{align*} \noindent% Recursively: \begin{align*} p_0 &= \exp\left\{ \frac{1}{\phi\mu} \left(1 - \sqrt{1 + 2\phi\mu^2}\right) \right\} \\ p_1 &= \frac{\mu}{\sqrt{1 + 2\phi\mu^2}}\, p_0 \\ p_x &= \frac{2\phi\mu^2}{1 + 2\phi\mu^2} \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{\mu^2}{1 + 2\phi\mu^2} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{align*} \noindent% In the limiting case $\mu = \infty$, the pmf reduces to \begin{equation*} p_x = \sqrt{\frac{2}{\pi \phi}} \frac{1}{x!} (\sqrt{2\phi})^{- \left( x - \frac{1}{2} \right)} K_{x - \frac{1}{2}} (\sqrt{2/\phi}), \quad x = 0, 1, \dots \end{equation*} and the recurrence relations become \begin{align*} p_0 &= \exp\left\{-\sqrt{2/\phi}\right\} \\ p_1 &= \frac{1}{\sqrt{2\phi}}\, p_0 \\ p_x &= \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{1}{2\phi} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{align*} %% References \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% coding: utf-8 %%% TeX-master: t %%% End: