\documentclass[11pt,twoside]{article}
\usepackage{amsmath, amsthm, amscd, amsfonts, amssymb, graphicx, color}
\usepackage[bookmarksnumbered, colorlinks]{hyperref} \usepackage{float}
\usepackage{lipsum}
\usepackage{afterpage}
\usepackage[labelfont=bf]{caption}
\usepackage[nottoc,notlof,notlot]{tocbibind} 
%\renewcommand\bibname{References}
\def\bibname{\Large \bf  References}
\usepackage{lipsum}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\renewcommand{\headrulewidth}{0pt}
\fancyhead[LE,RO]{\thepage}
\thispagestyle{empty}
%\afterpage{\lhead{new value}}

\fancyhead[CE]{Y Sayyari} 
\fancyhead[CO]{An improvement of the upper bound on the entropy of information sources}


%\topmargin=-1.6cm
\textheight 17.5cm%
\textwidth  12cm %
\topmargin   8mm  %
\oddsidemargin   20mm   %
\evensidemargin   20mm   %
\footskip=24pt     %

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{xca}[theorem]{Exercise}
%\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\renewenvironment{proof}{{\bfseries \noindent Proof.}}{~~~~$\square$}
\makeatletter
\def\th@newremark{\th@remark\thm@headfont{\bfseries}}
\makeatletter



  
  
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If you want to insert other packages. Insert them here
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\long\def\symbolfootnote[#1]#2{\begingroup%
%\def\thefootnote{\fnsymbol{footnote}}\footnote[#1]{#2}\endgroup}



 \def \thesection{\arabic{section}}
 

\begin{document}
%\baselineskip 9mm
%\setcounter{page}{}
\thispagestyle{plain}
{\noindent Journal of Mathematical Extension \\
Vol. XX, No. XX, (2014), pp-pp (Will be inserted by layout editor)}\\
ISSN: 1735-8299\\
URL: http://www.ijmex.com\\
\vspace*{9mm}

\begin{center}

{\Large \bf 
An improvement of the upper bound on the entropy of information sources\\}
%{\bf Do You Have a Subtitle? \\ If so, Write It Here} 


\let\thefootnote\relax\footnote{\scriptsize Received: XXXX; Accepted: XXXX (Will be inserted by editor)}

%{\bfYamin Sayyari}\vspace*{-2mm}\\
%\vspace{2mm} {\small  Enter affiliation here} \vspace{2mm}

{\bf  Yamin Sayyari$^*$\let\thefootnote\relax\footnote{$^*$Corresponding Author}}\vspace*{-2mm}\\
\vspace{2mm} {\small  Department of Mathematics, Sirjan  University Of Technology, Sirjan, Iran} \vspace{2mm}

\end{center}

\vspace{4mm}


{\footnotesize
\begin{quotation}
{\noindent \bf Abstract.} Theory of zeta functions and fractional calculus plays an important role in the statistical problems and Shannon's entropy. Estimation of Shannon's entropies of information sources from numerical simulation of long orbits is difficult. Our aim within this paper is to present a strong upper bound for the Shannon's entropy of information sources.
\end{quotation}
\begin{quotation}
\noindent{\bf AMS Subject Classification:} 37B40; 94A17; 26A51.

\noindent{\bf Keywords and Phrases:} Entropy, Shannon's entropy, Information source, Stochastic process, zeta function.
\end{quotation}}

\section{Introduction}
If $s>1$, then Riemann function is defined as
\begin{align*}
\zeta(s)=\sum_{n=1}^\infty \frac{1}{n^s}.
\end{align*}
The subject of fractional calculus has emerged as a powerful mathematical instrument during the past years, and is used in every branch of the statistics, engineering, and in other fields. S. Golomb showed that Riemann's zeta function $\zeta$ induces
a probability distribution $\pi(n)=\frac{n^{-s}}{\zeta(s)}$ on the positive integers, for every $s>1$ \cite{Gol}. In Guiasu \cite{Sgu}, the author proved that the probability distribution mentioned above is the unique solution of an entropy-maximization problem.
Fractional calculus of zeta functions can also be used to maximize
\begin{align*}
H=-\sum_{n} \pi(n) \log \pi(n),
\end{align*}
where $\{\pi(n):n\in \mathbb{N}\}$ is a probability distribution on $\mathbb{N}$ \cite{Gua}.
\begin{theorem}
\cite{Gua} Let $\alpha\in \mathbb{R}\backslash \mathbb{Z}$, $\pi(n)>0$ and $\sum_n \pi(n)=1$. The maximization of Shannon entropy $H=-\sum_{n} \pi(n) \log \pi(n)$ and
\begin{align*}
\sum_{n\in \mathbb{N}} \pi(n) \log D^\alpha_f n^{-x}=\chi_\alpha,~~ x>1+\alpha,
\end{align*}
has a solution given by
\begin{align*}
\pi(n)=\frac{D^\alpha_f n^{-x}}{\zeta^{(\alpha)(x)}}, ~~n\in \mathbb{N}. 
\end{align*}
where the forward Grunwald-Letnikov fractional derivative of $f$ is defined as follows:
\begin{align*}
D^\alpha_f f(x)=\lim_{h\rightarrow 0^+}\frac{\sum_{m=0}^\infty {\alpha\choose m}(-1)^m f(x-mh)}{h^\alpha}.
\end{align*}
\end{theorem}
Entropy and mutual information for random variables play important roles in dynamical systems and information theory. The entropy actually measures the degree of irregularities of a dynamic system, and researchers have done so much to calculate this concept, which is often successful \cite{top, my}, but numerical calculations of entropy are still difficult.
Tapus and Popescu presented a strong upper bound for the classical Shannon entropy \cite{12}. 
In \cite{12, 11, myw}, the authors presented a strong upper bound for the classical Shannon entropy. 
In \cite{soft}, the authors presented the algebraic and Shannon entropies for hypergroupoids and commutative hypergroups, respectively, and studies their fundamental properties.
In \cite{myw}, the author applying Jensen’s inequality in information theory and we obtain some results for the Shannon’s entropy of random variables and Shannon’s entropy of information sources.
Our purpose within this work is to present a strong upper bound for the Shannon entropy of information sources, refining recent results from the literature. 

Let $X$ be a non-empty set, $\mathcal{F}$ is an $\sigma$-algebra of subsets of $X$, $\mu$ is a measure on $X$ and $\mu(X)=1$, then $ (X, \mathcal{F}, \mu )$ is called  measure probability space. A finite set of measurable sets $\alpha=\{A_1,\ldots,A_n\}$ is called a finite partition if the following properties are fulfilled \cite{walter}:
\begin{align*}
\bigcup_{i=1}^nA_i=X,~~\text{and}~~A_i\cap A_j=\emptyset~~\text{for every}~i,j(1\leq i\neq j\leq n).
\end{align*}
For a partition $\alpha=\{A_1,\ldots,A_n\}$ , the entropy of $\alpha$ is defined by
\begin{align*}
H_\mu(\alpha):=-\sum_{i=1}^n\mu(A_i)\log(\mu(A_i)).
\end{align*}
\begin{definition}
\cite{ami} Let $S$ be  a random variable on $X$ with discrete finite state space $A=\{a_{1},...,a_{N} \}$. We define $p:A\rightarrow [0,1]$ by $p(s)=\mu \{ \omega\in X: S(\omega)=s \}.$ The Shannon's entropy of $S$ is defined by 
\begin{align*}
H_\mu (S):=-\sum_{s\in A,~ p(s)\neq 0} p(s)\log p(s).
\end{align*}
\end{definition}
An information sources ${\bf S}$ is a sequence  $(S_{n})_{n=1}^{\infty}$ of the random variables $S_{n}:X\longrightarrow A$, where  $n\in \mathbb{N}.$ For given $L\geq 1$  we define a mapping $p:A^{L}\rightarrow [0,1]$ by $p(s_1^L)=\mu \{{\omega \in X : S_{1}(\omega)=s_{1},..., S_{L}(\omega)=s_{L}}\}.$
 The Shannon entropy of order $L$  and the Shannon entropy of source $\bf S$ are respectively defined by
 \begin{align*}
 H_\mu(S_1^{L}) =-\frac{1}{L}\sum_{s_1^L\in A^L}{p(s_1,...,s_{L}) \log p(s_1,...,s_{L}}),~\text{and}~
 h_\mu({\bf S})=\lim_{L\rightarrow \infty} H_\mu(S_1^{L}).
\end{align*}
where the the summation is taken over the collection $\{s_1^L\in A^L:p(s_1^L)\neq 0\}$.
In this paper we use the symbol $s_1^L$ instead of notation $(s_1,...,s_L)$ and Let $p(s_1^L)\neq 0$ for every $L\in \mathbb{N}$.
\begin{theorem}\label{pmy}
	\cite{myw} Let $I=[a,b]$ be an interval, $H:A^L\longrightarrow I$ be a function, and $f:I\longrightarrow \mathbb{R}$ be a convex function, then
	\begin{align}
		&\sum_{s_1^L\in A^L}p(s_1^L)f(H(s_1^L))-f(\sum_{s_1^L\in A^L} p(s_1^L)H(s_1^L))  \nonumber \\
		&\geq \max\{p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))\nonumber\\
		&-(p(r_1^L)+p(t_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)}
		{p(r_1^L)+p(t_1^L)})\},
	\end{align}
	where the maximum is taken over all $r_1^L\neq t_1^L\in A^L$.
	
\end{theorem}
\section{main results}
In this section, we continue with a refinement of Theorem \ref{pmy}, as follows:
\begin{theorem}\label{A}
Let $I=[a,b]$ be an interval, $H:A^L\longrightarrow I$ be a function, and $f:I\longrightarrow \mathbb{R}$ be a convex function, then
\begin{align*}
&\sum_{s_1^L\in A^L}p(s_1^L)f(H(s_1^L))-f(\sum_{s_1^L\in A^L} p(s_1^L)H(s_1^L))   \\
&\geq \max\{p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))+p(u_1^L)f(H(u_1^L))\\
&-(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}
{p(r_1^L)+p(t_1^L)+p(u_1^L)})\},\\
&\geq \max\{p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))+p(u_1^L)f(H(u_1^L))\}\\
&-(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}
{p(r_1^L)+p(t_1^L)+p(u_1^L)})\},
\end{align*}
where the maximum is taken over all distinct $r_1^L, t_1^L, u_1^L\in A^L$.
\end{theorem}
\begin{proof}
Choose arbitrary $t_1^L, r_1^L, u_1^L\in A^L$. So,
\begin{align*}
&f(\sum_{s_1^L\in A^L} p(s_1^L)H(s_1^L))=f(\sum_{ s_1^L \neq r_1^L, t_1^L, u_1^L\in A^L} p(s_1^L)H(s_1^L))\\
&+(p(r_1^L)+p(t_1^L)+p(u_1^L))(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)})\\
&\leq \sum p(s_1^L)f(H(s_1^L))\\
&+(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)}),
\end{align*}
where $s_1^L \neq r_1^L, t_1^L, u_1^L\in A^L$.
Therefore,
\begin{align*}
&\sum_{ s_1^L \in A^L} p(s_1^L)f(H(s_1^L))- f(\sum_{s_1^L\in A^L} p(s_1^L)H(s_1^L))\\
&\geq p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))+p(u_1^L)f(H(u_1^L))
\\&-(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)}).
\end{align*}
Since $s_1^L, t_1^L\in A^L,u^1_L$ are arbitrary,
\begin{align*}
&\sum_{s_1^L\in A^L}p(s_1^L)f(H(s_1^L))-f(\sum_{s_1^L\in A^L} p(s_1^L)H(s_1^L))   \\
&\geq \max\{p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))+p(u_1^L)f(H(u_1^L))\}\\
&-(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}
{p(r_1^L)+p(t_1^L)+p(u_1^L)})\},
\end{align*}
where the maximum is taken over all distinct $r_1^L, t_1^L, u_1^L\in A^L$.
On the other hand,
\begin{align*}
&f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)})\\
&=f(\frac{p(r_1^L)+p(t_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)}\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)}{p(r_1^L)+p(t_1^L)}+\frac{p(u_1^L)H(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)})\\
&\leq  \frac{p(r_1^L)+p(t_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)}f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)}{p(r_1^L)+p(t_1^L)})\\
&+\frac{p(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)}f(H(u_1^L)).
\end{align*}
So,
\begin{align*}
&(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}{p(r_1^L)+p(t_1^L)+p(u_1^L)})\\
&\leq  (p(r_1^L)+p(t_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)}{p(r_1^L)+p(t_1^L)})+(p(u_1^L))f(H(u_1^L)).
\end{align*}
Thus,
\begin{align*}
&p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))+p(u_1^L)f(H(u_1^L))\\
&-(p(r_1^L)+p(t_1^L)+p(u_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)+p(u_1^L)H(u_1^L)}
{p(r_1^L)+p(t_1^L)+p(u_1^L)})\\
&\geq p(r_1^L)f(H(r_1^L))+p(t_1^L)f(H(t_1^L))\}\\
&-(p(r_1^L)+p(t_1^L))f(\frac{p(r_1^L)H(r_1^L)+p(t_1^L)H(t_1^L)}
{p(r_1^L)+p(t_1^L)}),
\end{align*}
which completes the proof.
\end{proof}

In order to present the generalization, we define some
notation, as follows:
\begin{align*}
T_{k}:=\max\{\sum _{i=1}^{k}p({r_i}_1^L)f(H({r_i}_1^L))-(\sum _{i=1}^{k}p({r_i}_1^L))f(\frac{\sum _{i=1}^{k}p({r_i}_1^L)H({r_i}_1^L)}
{\sum _{i=1}^{k}p({r_i}_1^L)})\}
\end{align*}
where $2\leq k\leq N^{L}-1$, the maximum is taken over all distinct ${r_i}_1^L\in A^L$.
\begin{theorem}
Let $I=[a,b]$ be an interval, $H:A^L\longrightarrow I$ be a function, $|A|=N$ and $f:I\longrightarrow \mathbb{R}$ be a convex function, then
\begin{align*}
0\leq T_2\leq T_3\leq ...\leq T_{N^L-1}\leq \sum_{s_1^L\in A^L}p(s_1^L)f(H(s_1^L))-f(\sum_{s_1^L\in A^L} p(s_1^L)H(s_1^L)).
\end{align*}
\end{theorem}
\begin{proof}
The proof is similar to the proof of Theorem \ref{A}.
\end{proof}
\section{The sources entropy upper bound}
In this section we  present a strong upper bound for the Shannon's entropy of information sources.
\begin{theorem}\label{lthm}
Let ${\bf S}$ be an information source. Then
\begin{align*}
h_\mu({\bf S})
&\leq \log N-\max_k\{\lim_{L\rightarrow \infty}\frac{1}{L} log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}]\\
& \times[\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)}]\}.
\end{align*}
%\end{align*}
\end{theorem}
\begin{proof}
Since 
\begin{align*}
&-LH_\mu(S_1^{L})+\log(N^L)
\geq \max_k\{-\sum _{i=1}^{k}p({r_i}_1^L)\log(\frac{1}{p({r_i}_1^L)})
+(\sum _{i=1}^{k}p({r_i}_1^L))\\
&\times \log(\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)})\}
=\max_k\{\log(\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)})\\
&+\log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}]\},
\end{align*}
\begin{align*}
\log N-H_\mu(S_1^{L})\geq \max\{\frac{1}{L} log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}][\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)}]\},
\end{align*}
and
\begin{align*}
H_\mu(S_1^{L})\leq \log N-\max_k\{\frac{1}{L} log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}][\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)}]\}.
\end{align*}
Therefore,
\begin{align*}
&h_\mu({\bf S})\\
&\leq \log N-\lim_{L\rightarrow \infty}\max_k\{\frac{1}{L} log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}][\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)}]\}\\
&\leq \log N-\max_{2\leq k\leq N^L-1}\{\lim_{L\rightarrow \infty}\frac{1}{L} log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}][\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)}]\}.
\end{align*}
\end{proof}

Entropy of information sources is very important in synamical systems and information theory. Let $ (X, \mathcal{F}, \mu )$ me a probability measure space. For a partition 
\begin{align*}
	\alpha=\{A_0,...,A_N\}
\end{align*}
and measure-preserving dynamical system $f:X\longrightarrow X$, the maps
\begin{align*}
	S_n:X\longrightarrow T_N:=\{0,...,N\},
\end{align*}
defined as
\begin{align*}
	S_n(x)=i~~\text{if and only if}~~ f^n(x)\in A_i
\end{align*}
are random variables on the probability measure space $X$. In this case we have
\begin{align*}
	p(i)=\mu(A_i),
\end{align*}
for every $i(0\leq i\leq N)$, and $h_\mu({\bf S}_\alpha)=h_\mu(f,\alpha)$ where ${\bf S}_\alpha=\{S_n\}$ \cite{ami}. Since The metric entropy of $f$ is then the supremum of $h_\mu(f,\alpha)$ over all finite partitions of $ (X, \mathcal{F}, \mu )$
(i.e.
\begin{align}\label{lfor}
h_\mu(f)=\sup_{\alpha}h_\mu(f,\alpha)=\sup_{\alpha}h_\mu({\bf S}_\alpha).)
\end{align}
 Thus, an approximation of entropy $f$ is obtained by using \ref{lfor}.
\section{motivation and conclusion}
In this paper, we have obtained some mathematical inequalities for entropy of information sources. Also we found new and strong bounds for the Shannon's entropy of information sources. Theorem \ref{lthm}, shows that in general, 
\begin{align*}
\log N-\frac{1}{L} log[\{\frac{k}{\sum _{i=1}^{k}p({r_i}_1^L)}\}^{\sum _{i=1}^{k}p({r_i}_1^L)}]
& \times[\prod _{i=1}^{k}\{p({r_i}_1^L)\}^{p({r_i}_1^L)}]
\end{align*}
can only be expected to be an upper bound of $h_\mu({\bf S})$, we will try to extend it in the future.
\begin{center}
\begin{thebibliography}{99} % Enter references in alphabetical order and according to the following format.
\bibitem{Adel}
M. Adeel, K.A. Khan, D. Pe$\hat{c}$ari$\acute{c}$, J. Pe$\hat{c}$ari$\acute{c}$, Generalization of 
the Levinson inequality with
applications to information theorey, J. Inequal. Appl., 230 (2019). 
\bibitem{Adel2}
M. Adeel, K.A. Khan, D. Pe$\hat{c}$ari$\acute{c}$, J. Pe$\hat{c}$ari$\acute{c}$, Levinson type inequalities for higher order convex functions via
Able-Gontscharo interpolation, Ad. Differ. Equ., 430 (2019).

\bibitem{Adel3}
M. Adeel, K.A. Khan, D. Pe$\hat{c}$ari$\acute{c}$, J. Pe$\hat{c}$ari$\acute{c}$, Estimation of f-divergence and Shannon entropy by Levinson type inequalities via new Green's functions and Lidstone polynomial, Ad. Differ.  Equ., 2020, 27 (2020).

\bibitem{top}
 J.M. Amigo, M.B. Kennel, Topological permutation entropy. { Physica D}. {231}.
 
 \bibitem{ami}
J.M. Amigo, { Permutation Complexity in Dynamical Systems "Ordinal Patterns,
Permutation Entropy, and All That"}, Springer-Verlag, Berlin, $2010.$ 

\bibitem{catti}
C. Cattani and E. Guariglia, Fractional derivative of the Hurwitz $\zeta$-function and chaotic decay to zero, J. King Saud Univ. Sci. 28 (1) (2016), 75–81, https://doi.org/10.1016/j.jksus.2015.04.003.

\bibitem{Gol}
S.W. Golomb, A class of probability distributions on the integers, J. Number Theory, 2 (1970), 189-192.

\bibitem{Gua}
E. Guariglia, Fractional calculus, zeta functions and Shannon entropy, Open Mathematics, 19 (1) (2021), https://doi.org/10.1515/math-2021-0010.

\bibitem{Sgu}
S. Guiasu, An optimization problem related to the zeta function, Canad. Math. Bull. 29 (1) (1986), 70–73, https://doi.org/10.4153/CMB-1986-013-7.
 
\bibitem{Rich}
R.L. Magin, C. Ingo, Entropy and information in a fractional order model of anomalous diffusion, 16th IFAC Symposium on System Identification The International Federation of Automatic Control Brussels, Belgium. (2012), 11-13.

\bibitem{12}
N. Tapus, P.G. Popescu, A new entropy upper bound, Appl. Math. Lett. 25 (11) (2012), 1887-1890.

\bibitem{my}
Ch. Corda, M. FatehiNia, M.R. Molaei, Y. Sayyari, Entropy of Iterated Function Systems and Their Relations with Black Holes and Bohr-Like Black Holes Entropies, Entropy. 20, 56, (2018).

\bibitem{soft}A. Mehrpooya, Y. Sayyari, M.R. Molaei, Algebraic and Shannon entropies of commutative hypergroups and their connection with information and permutation entropies and with calculation of entropy for chemical algebras, Soft Computing 23 (24) (2019), 13035-13053.

\bibitem{myw}
Y. Sayyari, New bounds for entropy of information sources, Wavelets and Linear Algebra, 7 (2) (2020), 1-9.


\bibitem{my4}
Y. Sayyari, New entropy bounds via uniformly convex functions, Chaos, Solitons and Fractals 141 (2020), 110360 (Doi.org/10.1016/j.chaos.2020.110360).

\bibitem{11}
S. Simic, Jensen’s inequality and new entropy bounds, Appl. Math. Lett. 22 (8) (2009), 1262-1265.

\bibitem{walter}
P. Walters,  An introduction to ergodic theory (Springer Verlag, New York, 2000).

\end{thebibliography}
\end{center}



{\small

\noindent{\bf Yamin Sayyari}

\noindent Department of Mathematics

\noindent Assistant Professor of Mathematics

\noindent Sirjan University Of Technology

\noindent Sirjan, Iran

\noindent E-mail: ysayyari@gmail.com}\\

\end{document}