diff --git a/.gitignore b/.gitignore index de4b27b7..025ab069 100644 --- a/.gitignore +++ b/.gitignore @@ -139,9 +139,9 @@ data/SPY_D_TV3.csv data/TV_5min.csv data/tulip.csv examples/*.csv -jnb/* -notes/* -csv/* +jnb/ +notes/ +csv/ *.txt df_file diff --git a/Makefile b/Makefile index 0954a8cf..f218b305 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ .PHONY: tests tests: - pytest -vv -s -l tests + pytest -vvv -s -l tests caches: find ./pandas_ta | grep -E "(__pycache__|\.pyc|\.pyo$\)" @@ -12,16 +12,16 @@ init: pip install -r requirements.txt test_metrics: - pytest -vv -s -l --cache-clear tests/test_metrics.py + pytest -vv -s -l -W ignore::DeprecationWarning --cache-clear tests/test_metrics.py test_numba: - pytest -vv -s -l --cache-clear tests/test_numba.py + pytest -vv -s -l -W ignore::DeprecationWarning --cache-clear tests/test_numba.py test_studies: - pytest -vv -s -l --cache-clear tests/test_studies.py + pytest -vv -s -l -W ignore::DeprecationWarning --cache-clear tests/test_studies.py test_ta: - pytest -vv -s -l --cache-clear tests/test_indicator_*.py + pytest -vv -s -l -W ignore::DeprecationWarning --cache-clear tests/test_indicator_*.py test_utils: - pytest -vv -s -l --cache-clear tests/test_utils.py \ No newline at end of file + pytest -vv -s -l -W ignore::DeprecationWarning --cache-clear tests/test_utils.py diff --git a/README.md b/README.md index 026ba2be..33e8133c 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,7 @@

-Pandas TA - A Technical Analysis Library in Python 3 -================= +# Pandas TA - A Technical Analysis Library in Python 3 [![license](https://img.shields.io/github/license/twopirllc/pandas-ta)](#license) [![Python Version](https://img.shields.io/pypi/pyversions/pandas-ta?style=flat)](https://pypi.org/project/pandas_ta/) @@ -20,7 +19,6 @@ Pandas TA - A Technical Analysis Library in Python 3 [![Closed Issues](https://img.shields.io/github/issues-closed-raw/twopirllc/pandas-ta?style=flat)](#closed-issues) [![Buy Me a Coffee](https://img.shields.io/badge/buy_me_a_coffee-orange.svg?style=flat)](https://www.buymeacoffee.com/twopirllc) -
![Example Chart](/images/SPY_Chart.png) @@ -28,106 +26,106 @@ Pandas TA - A Technical Analysis Library in Python 3
-_Pandas Technical Analysis_ (**Pandas TA**) is a free, Open Source, and easy to use Technical Analysis library with a Pandas DataFrame Extension. It has over 200 indicators, utility functions and TA Lib Candlestick Patterns. Beyond TA feature generation, it has a flat library structure, it's own DataFrame Extension (called ```ta```), Custom Indicator Sets (called a ```Study```) and Custom Directory creation. Lastly, it includes methods to help with Data Acquisition and Stochastic Sampling, Backtesting Support with Signal and Trend methods, and some basic Performance Metrics. +_Pandas Technical Analysis_ (**Pandas TA**) is a free, Open Source, and easy to use Technical Analysis library with a Pandas DataFrame Extension. It has over 200 indicators, utility functions and TA Lib Candlestick Patterns. Beyond TA feature generation, it has a flat library structure, it's own DataFrame Extension (called `ta`), Custom Indicator Sets (called a `Study`) and Custom Directory creation. Lastly, it includes methods to help with Data Acquisition and Stochastic Sampling, Backtesting Support with Signal and Trend methods, and some basic Performance Metrics.
# **Contents** -* [Features](#features) -* [Used By](#used-by) -* [Sponsors](#sponsors) -* [Installation](#installation) - * [Stable](#stable) - * [Latest Version](#latest-version) - * [Development Version](#development-version) -* [Quick Start](#quick-start) -* [Help](#help) -* [Issues and Contributions](#issues-and-contributions) -* [Programming Conventions](#programming-conventions) - * [Standard](#standard) - * [Pandas TA DataFrame Extension](#dataframe-extension) - * [Pandas TA Study](#pandas-ta-study) - * [Types of Studies](#types-of-studies) - * [Multiprocessing](#multiprocessing) -* [DataFrame Extension Properties](#dataframe-extension-properties) -* [DataFrame Extension Methods](#dataframe-extension-methods) -* [Indicators by Category](#indicators-by-category) - * [Candles](#candles-64) - * [Cycles](#cycles-2) - * [Momentum](#momentum-43) - * [Overlap](#overlap-36) - * [Performance](#performance-3) - * [Statistics](#statistics-11) - * [Transform](#transform-3) - * [Trend](#trend-21) - * [Utility](#utility-5) - * [Volatility](#volatility-16) - * [Volume](#volume-20) -* [Backtesting](#backtesting) - * [Vector BT](#vector-bt) -* [BETA](#beta) - * [Stochastic Samples](#stochastic-samples) - * [Performance Metrics](#performance-metrics) -* [TODO](#todo) -* [Sources](#sources) -* [Support](#support) - +- [Features](#features) +- [Used By](#used-by) +- [Sponsors](#sponsors) +- [Installation](#installation) + - [Stable](#stable) + - [Latest Version](#latest-version) + - [Development Version](#development-version) +- [Quick Start](#quick-start) +- [Help](#help) +- [Issues and Contributions](#issues-and-contributions) +- [Programming Conventions](#programming-conventions) + - [Standard](#standard) + - [Pandas TA DataFrame Extension](#dataframe-extension) + - [Pandas TA Study](#pandas-ta-study) + - [Types of Studies](#types-of-studies) + - [Multiprocessing](#multiprocessing) +- [DataFrame Extension Properties](#dataframe-extension-properties) +- [DataFrame Extension Methods](#dataframe-extension-methods) +- [Indicators by Category](#indicators-by-category) + - [Candles](#candles-64) + - [Cycles](#cycles-2) + - [Momentum](#momentum-43) + - [Overlap](#overlap-36) + - [Performance](#performance-3) + - [Statistics](#statistics-11) + - [Transform](#transform-3) + - [Trend](#trend-22) + - [Utility](#utility-5) + - [Volatility](#volatility-16) + - [Volume](#volume-20) +- [Backtesting](#backtesting) + - [Vector BT](#vector-bt) +- [BETA](#beta) + - [Stochastic Samples](#stochastic-samples) + - [Performance Metrics](#performance-metrics) +- [TODO](#todo) +- [Sources](#sources) +- [Support](#support) +
# **Features** -Large & Lite Weight Library ---------------------------- -* Over 200 Indicators, Statistics and Candlestick Patterns. - * Over 60 Candlestick Patterns with **[TA Lib](https://github.com/TA-Lib/ta-lib-python)** indicator integration. -* Flat library structure similar to **TA Lib**. -* Single dependency: [Pandas](https://pandas.pydata.org/) - -Accuracy --------- -* Indicators are highly correlated, _r > 0.99_, with [TA Lib](https://github.com/TA-Lib/ta-lib-python) and builtin [TradingView](https://www.tradingview.com/) Indicators. - * :chart_with_upwards_trend: Contributions are welcome for improved accuracy and performance. - -Performance ------------ -* Pandas TA is fast, with or without **TA Lib** or **Numba** installed, but one is not penalized if they are installed. - * **TA Lib** computations are **enabled** by default. They can be disabled per indicator. - * The library includes a performance method, ```help(ta.speed_test)```, to check runtime indicator performance for a given _ohlcv_ DataFrame. -* Optionable **Multiprocessing** for a Pandas TA ```Study```. -* Check Indicator Speeds on your system with the [Indicator Speed Check Notebook](https://github.com/twopirllc/pandas-ta/tree/main/examples/Speed_Check.ipynb). - -Bulk Processing ---------------- -* Easily process many indicators using the DataFrame Extension method ```df.ta.study()```. -* Supports two kinds of Studies. - * **Builtin**: All, Categorical ("candles", "momentum", ...), and Common. - * **Custom**: User Defined ```Study``` (formerly ```Strategy```). - -Additional Features -------------------- -* **Examples** - * Basic usage and workflows. See the [**Example Jupyter Notebooks**](https://github.com/twopirllc/pandas-ta/tree/main/examples). - * Creating Custom Studies using the [__Study__ Class](https://github.com/twopirllc/pandas-ta/tree/main/examples/PandasTA_Study_Examples.ipynb). - * **Study Customizations** including, but not limited to, applying _prefixes_ or _suffixes_ or _both_ to column/indicators names. - * Composition/Chained Studies like putting **bbands** on **macd**. -* **Custom Indicators Directory** - * Create and import a custom directory containing private indicators independent from the main library. - * Use ```help(ta.import_dir)``` or read the ```import_dir``` method in [/pandas_ta/custom.py](https://github.com/twopirllc/pandas-ta/blob/main/pandas_ta/custom.py) for more information. -* **Data Acquisition** - * Easily download _ohlcv_ data from [yfinance](https://github.com/ranaroussi/yfinance) or with the [Polygon API](https://github.com/pssolanki111/polygon). - * See ```help(ta.ticker)```, ```help(ta.yf)```, ```help(ta.polygon_api)``` and examples below. -* **Stochastic Sample Generation** _BETA_ - * Built upon many of the Stochastic Processes from the [stochastic](https://github.com/crflynn/stochastic) package. - * See ```help(ta.sample)```. -* **Performance Metrics** _BETA_ - * A mini set of Performance Metrics. - * :chart_with_upwards_trend: Contributions are welcome for improved accuracy and performance. -* **Backtesting Support** _BETA_ - * Easily generate Trading Signals for [**vectorbt**](https://github.com/polakowo/vectorbt) using ```ta.tsignals()``` or ```ta.xsignals()``` methods. +## Large & Lite Weight Library + +- Over 200 Indicators, Statistics and Candlestick Patterns. + - Over 60 Candlestick Patterns with **[TA Lib](https://github.com/TA-Lib/ta-lib-python)** indicator integration. +- Flat library structure similar to **TA Lib**. +- Single dependency: [Pandas](https://pandas.pydata.org/) + +## Accuracy + +- Indicators are highly correlated, _r > 0.99_, with [TA Lib](https://github.com/TA-Lib/ta-lib-python) and builtin [TradingView](https://www.tradingview.com/) Indicators. + - :chart_with_upwards_trend: Contributions are welcome for improved accuracy and performance. + +## Performance + +- Pandas TA is fast, with or without **TA Lib** or **Numba** installed, but one is not penalized if they are installed. + - **TA Lib** computations are **enabled** by default. They can be disabled per indicator. + - The library includes a performance method, `help(ta.speed_test)`, to check runtime indicator performance for a given _ohlcv_ DataFrame. +- Optionable **Multiprocessing** for a Pandas TA `Study`. +- Check Indicator Speeds on your system with the [Indicator Speed Check Notebook](https://github.com/twopirllc/pandas-ta/tree/main/examples/Speed_Check.ipynb). + +## Bulk Processing + +- Easily process many indicators using the DataFrame Extension method `df.ta.study()`. +- Supports two kinds of Studies. + - **Builtin**: All, Categorical ("candles", "momentum", ...), and Common. + - **Custom**: User Defined `Study` (formerly `Strategy`). + +## Additional Features + +- **Examples** + - Basic usage and workflows. See the [**Example Jupyter Notebooks**](https://github.com/twopirllc/pandas-ta/tree/main/examples). + - Creating Custom Studies using the [**Study** Class](https://github.com/twopirllc/pandas-ta/tree/main/examples/PandasTA_Study_Examples.ipynb). + - **Study Customizations** including, but not limited to, applying _prefixes_ or _suffixes_ or _both_ to column/indicators names. + - Composition/Chained Studies like putting **bbands** on **macd**. +- **Custom Indicators Directory** + - Create and import a custom directory containing private indicators independent from the main library. + - Use `help(ta.import_dir)` or read the `import_dir` method in [/pandas_ta/custom.py](https://github.com/twopirllc/pandas-ta/blob/main/pandas_ta/custom.py) for more information. +- **Data Acquisition** + - Easily download _ohlcv_ data from [yfinance](https://github.com/ranaroussi/yfinance) or with the [Polygon API](https://github.com/pssolanki111/polygon). + - See `help(ta.ticker)`, `help(ta.yf)`, `help(ta.polygon_api)` and examples below. +- **Stochastic Sample Generation** _BETA_ + - Built upon many of the Stochastic Processes from the [stochastic](https://github.com/crflynn/stochastic) package. + - See `help(ta.sample)`. +- **Performance Metrics** _BETA_ + - A mini set of Performance Metrics. + - :chart_with_upwards_trend: Contributions are welcome for improved accuracy and performance. +- **Backtesting Support** _BETA_ + - Easily generate Trading Signals for [**vectorbt**](https://github.com/polakowo/vectorbt) using `ta.tsignals()` or `ta.xsignals()` methods.
@@ -135,75 +133,79 @@ Back to [Contents](#contents)
-**Used By** -=================== +# **Used By** + Pandas TA is used by Applications and Services like -[Freqtrade](https://github.com/freqtrade/freqtrade) ----------- +## [Freqtrade](https://github.com/freqtrade/freqtrade) + > Freqtrade is a free and open source crypto trading bot written in Python. It is designed to support all major exchanges and be controlled via Telegram. It contains backtesting, plotting and money management tools as well as strategy optimization by machine learning.
-[Open BB](https://openbb.co/) -------------------- +## [Open BB](https://openbb.co/) + #### Previously **Gamestonk Terminal** + > OpenBB is a leading open source investment analysis company. -We represent millions of investors who want to leverage state-of-the-art data science and machine learning technologies to make sense of raw unrefined data. Our mission is to make investment research effective, powerful and accessible to everyone. +> We represent millions of investors who want to leverage state-of-the-art data science and machine learning technologies to make sense of raw unrefined data. Our mission is to make investment research effective, powerful and accessible to everyone.
-[QUANTCONNECT](https://www.quantconnect.com/) -------------------- +## [QUANTCONNECT](https://www.quantconnect.com/) + > QUANTCONNECT powers your quantitative research with a cutting-edge, unified API for research, backtesting, and live trading on the world's leading algorithmic trading platform.
-[Tune TA](https://github.com/jmrichardson/tuneta) -------------------- +## [Tune TA](https://github.com/jmrichardson/tuneta) + > TuneTA optimizes technical indicators using a distance correlation measure to a user defined target feature such as next day return. Indicator parameter(s) are selected using clustering techniques to avoid "peak" or "lucky" values. The set of tuned indicators can be ...
-[VectorBT Pro](https://vectorbt.pro/) -------------------- +## [VectorBT Pro](https://vectorbt.pro/) + > vectorbt PRO is the next-generation engine for backtesting, algorithmic trading, and research. It's a high-performance, actively-developed, commercial successor to the vectorbt library, one of the world's most innovative open-source backtesting engines. The PRO version extends the standard library with new impressive features and useful enhancements for professionals.
-**Sponsors** -=================== +# **Sponsors** + Thank you for your sponsorship of Pandas TA!
-**Installation** -=================== -The _minimum_ requirement is [Pandas](https://github.com/pandas-dev/pandas). Though not required, additional features _may_ require ```numba```, ```polygon```, ```sklearn```, ```statsmodels```, ```stochastic```, ```ta-lib```, ```tqdm```, ```vectorbt```, or ```yfinance```. -* **Note**: ```vectorbt``` requires many of the additional packages listed. +# **Installation** + +The _minimum_ requirement is [Pandas](https://github.com/pandas-dev/pandas). Though not required, additional features _may_ require `numba`, `polygon`, `sklearn`, `statsmodels`, `stochastic`, `ta-lib`, `tqdm`, `vectorbt`, or `yfinance`. + +- **Note**: `vectorbt` requires many of the additional packages listed.
-Pip ---- -The ```pip``` version, _0.3.14b_, is the last stable release. The next **major** release will occur when all the remaining _Hilbert Transform_ indicators from TA Lib are [included](https://github.com/twopirllc/pandas-ta/labels/help%20wanted). +## Pip + +The `pip` version, _0.3.14b_, is the last stable release. The next **major** release will occur when all the remaining _Hilbert Transform_ indicators from TA Lib are [included](https://github.com/twopirllc/pandas-ta/labels/help%20wanted). + ```sh $ pip install pandas_ta ``` How about **All**? + ```sh $ pip install pandas_ta[full] ```
+## Development Version + +The _development_ version, _0.4.11b_, includes _numerous_ bug fixes, speed improvements and better documentation since release, _0.3.14b_. -Development Version -------------------- -The _development_ version, _0.4.10b_, includes _numerous_ bug fixes, speed improvements and better documentation since release, _0.3.14b_. ```sh $ pip install -U git+https://github.com/twopirllc/pandas-ta.git@development ``` @@ -213,13 +215,15 @@ Back to [Contents](#contents)
# **Quick Start** + Indicators return either a Pandas Series or DataFrame. -* **Note:** _Volume Weighted Average Price_ (**vwap**) is the only indicator that requires a DatetimeIndex. + +- **Note:** _Volume Weighted Average Price_ (**vwap**) is the only indicator that requires a DatetimeIndex.
-Simple Example --------------- +## Simple Example + ```python import pandas as pd import pandas_ta as ta @@ -255,6 +259,7 @@ For a more descriptive Quick Start, please check out Michelangiolo Mazzeschi's M
# **Help** + ```python import pandas as pd import pandas_ta as ta @@ -279,22 +284,22 @@ Back to [Contents](#contents) # **Issues and Contributions** Contributions, feedback, and bug squashing are integral to the success of this library. If you see something you can fix, _please_ do. Your contributon helps us all! -* :stop_sign: _Please_ **DO NOT** email me personally with Pandas TA Bugs, Issues or Feature Requests that are best handled with Github [Issues](https://github.com/twopirllc/pandas-ta/issues). + +- :stop*sign: \_Please* **DO NOT** email me personally with Pandas TA Bugs, Issues or Feature Requests that are best handled with Github [Issues](https://github.com/twopirllc/pandas-ta/issues).
-[Bugs, Indicators or Feature Requests](https://github.com/twopirllc/pandas-ta/issues) --------------------------------------- +## [Bugs, Indicators or Feature Requests](https://github.com/twopirllc/pandas-ta/issues) + 1. Some bugs and features may already be be fixed or implemented in either the [Latest Version](#latest-version) or the the [Development Version](#development-version). _Please_ try them first. 1. If the _Latest_ or _Development_ Versions do not resolve the bug or address the Issue, try searching both _Open_ and _Closed_ Issues **before** opening a new Issue. 1. When creating a new Issue, please be as **detailed** as possible **with** reproducible code, links if any, applicable screenshots, errors, logs, and data samples. - * You **will** be asked again for skipping form questions. - * Do you have correlation analysis to back your claim? + - You **will** be asked again for skipping form questions. + - Do you have correlation analysis to back your claim?
-**Contributors** -================ +# **Contributors** _Thank you for your contributions!_ @@ -302,97 +307,97 @@ _Thank you for your contributions!_
-How to [Contribute](https://github.com/twopirllc/pandas-ta/labels?sort=count-desc) or what [TODO](#todo)? -------------------- -| | | -|-|-| -| Satisfaction ```or``` Suggestions? | [Feedback](https://github.com/twopirllc/pandas-ta/labels/feedback) | -| Knowledge ```and``` Experience? | [Info](https://github.com/twopirllc/pandas-ta/labels/info) | -| ```!```hard | [Good First Issue](https://github.com/twopirllc/pandas-ta/labels/good%20first%20issue) | -| A little more challenging? | [Bugs](https://github.com/twopirllc/pandas-ta/labels/bug) / [Enhancements](https://github.com/twopirllc/pandas-ta/labels/enhancement) | -| Lonewolf? | [Help Wanted](https://github.com/twopirllc/pandas-ta/labels/help%20wanted) | +## How to [Contribute](https://github.com/twopirllc/pandas-ta/labels?sort=count-desc) or what [TODO](#todo)? +| | | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | +| Satisfaction `or` Suggestions? | [Feedback](https://github.com/twopirllc/pandas-ta/labels/feedback) | +| Knowledge `and` Experience? | [Info](https://github.com/twopirllc/pandas-ta/labels/info) | +| `!`hard | [Good First Issue](https://github.com/twopirllc/pandas-ta/labels/good%20first%20issue) | +| A little more challenging? | [Bugs](https://github.com/twopirllc/pandas-ta/labels/bug) / [Enhancements](https://github.com/twopirllc/pandas-ta/labels/enhancement) | +| Lonewolf? | [Help Wanted](https://github.com/twopirllc/pandas-ta/labels/help%20wanted) | Back to [Contents](#contents)
-**Programming Conventions** -=========================== +# **Programming Conventions** **Pandas TA** supports _three_ Programming Conventions to make it easy to calculate or apply TA features. This includes the Standard, DataFrame Extension, and Pandas TA Study Conventions. -* **Note**: Each indicator either returns a _Series_ or a _DataFrame_ in Uppercase Underscore format regardless of style. + +- **Note**: Each indicator either returns a _Series_ or a _DataFrame_ in Uppercase Underscore format regardless of style.
-Standard --------- +## Standard + The Standard Convention is similar to TA Lib where one has to _explicitly_ define the input arguments and manage the resultant _Series_ or _DataFrame_. -* ```sma10 = ta.sma(df["Close"], length=10)``` - * Returns a Series with name: ```SMA_10``` -* ```donchiandf = ta.donchian(df["HIGH"], df["low"], lower_length=10, upper_length=15)``` - * Returns a DataFrame named ```DC_10_15``` and column names: ```DCL_10_15, DCM_10_15, DCU_10_15``` -* ```ema10_ohlc4 = ta.ema(ta.ohlc4(df["Open"], df["High"], df["Low"], df["Close"]), length=10)``` - * Chaining indicators is possible but you have to be explicit. - * Since it returns a Series named ```EMA_10```. If needed, you may need to uniquely name it. +- `sma10 = ta.sma(df["Close"], length=10)` + - Returns a Series with name: `SMA_10` +- `donchiandf = ta.donchian(df["HIGH"], df["low"], lower_length=10, upper_length=15)` + - Returns a DataFrame named `DC_10_15` and column names: `DCL_10_15, DCM_10_15, DCU_10_15` +- `ema10_ohlc4 = ta.ema(ta.ohlc4(df["Open"], df["High"], df["Low"], df["Close"]), length=10)` + - Chaining indicators is possible but you have to be explicit. + - Since it returns a Series named `EMA_10`. If needed, you may need to uniquely name it.
-DataFrame Extension -------------------- -The [DataFrame Extension](https://pandas.pydata.org/docs/development/extending.html) "ta", extends the DataFrame with additional properties and methods specific to the library. Unlike the _Standard Convention_, ```df.ta``` uses the _ohlcva_ columns as indicator arguments thus removing the need to specify the columns manually. +## DataFrame Extension + +The [DataFrame Extension](https://pandas.pydata.org/docs/development/extending.html) "ta", extends the DataFrame with additional properties and methods specific to the library. Unlike the _Standard Convention_, `df.ta` uses the _ohlcva_ columns as indicator arguments thus removing the need to specify the columns manually. -* ```sma10 = df.ta.sma(length=10)``` - * Returns a Series with name: ```SMA_10``` -* ```ema10_ohlc4 = df.ta.ema(close=df.ta.ohlc4(), length=10, suffix="OHLC4")``` - * Returns a Series with name: ```EMA_10_OHLC4``` - * Chaining Indicators _requires_ specifying the input like: ```close=df.ta.ohlc4()```. -* ```donchiandf = df.ta.donchian(lower_length=10, upper_length=15)``` - * Returns a DataFrame named ```DC_10_15``` and column names: ```DCL_10_15, DCM_10_15, DCU_10_15``` +- `sma10 = df.ta.sma(length=10)` + - Returns a Series with name: `SMA_10` +- `ema10_ohlc4 = df.ta.ema(close=df.ta.ohlc4(), length=10, suffix="OHLC4")` + - Returns a Series with name: `EMA_10_OHLC4` + - Chaining Indicators _requires_ specifying the input like: `close=df.ta.ohlc4()`. +- `donchiandf = df.ta.donchian(lower_length=10, upper_length=15)` + - Returns a DataFrame named `DC_10_15` and column names: `DCL_10_15, DCM_10_15, DCU_10_15` -Same as the last three examples, but appending the results directly to the DataFrame ```df```. -* ```df.ta.sma(length=10, append=True)``` - * Appends to ```df``` column name: ```SMA_10```. -* ```df.ta.ema(close=df.ta.ohlc4(append=True), length=10, suffix="OHLC4", append=True)``` - * Chaining Indicators _requires_ specifying the input like: ```close=df.ta.ohlc4()```. -* ```df.ta.donchian(lower_length=10, upper_length=15, append=True)``` - * Appends to ```df``` with column names: ```DCL_10_15, DCM_10_15, DCU_10_15```. +Same as the last three examples, but appending the results directly to the DataFrame `df`. + +- `df.ta.sma(length=10, append=True)` + - Appends to `df` column name: `SMA_10`. +- `df.ta.ema(close=df.ta.ohlc4(append=True), length=10, suffix="OHLC4", append=True)` + - Chaining Indicators _requires_ specifying the input like: `close=df.ta.ohlc4()`. +- `df.ta.donchian(lower_length=10, upper_length=15, append=True)` + - Appends to `df` with column names: `DCL_10_15, DCM_10_15, DCU_10_15`. Back to [Contents](#contents)
-**Pandas TA** _Study_ -===================== -:stop_sign: The ```Strategy``` Class and ```strategy()``` are depreciated. Use ```Study``` Class and ```study()``` method instead. +# **Pandas TA** _Study_ + +:stop_sign: The `Strategy` Class and `strategy()` are depreciated. Use `Study` Class and `study()` method instead. -The ```Study``` [_Dataclass_](https://docs.python.org/3/library/dataclasses.html) can be used to name and group indicators and are executed by the Extension ```study()``` method. A ```Study``` can range from _complex_ with _Composition/Chaining_ to _simple_ like a ```CommonStudy```. -* Two premade _Studies_: ```AllStudy``` and ```CommonStudy```. -* The ```study()``` method automatically appends to the DataFrame. - * Can be disabled by using the argument: ```append=False```. -* All Studies use **mulitprocessing** _except_ when the cpu ```cores``` are set to zero or when using the ```col_names``` argument (see [below](#multiprocessing)). -* A Study will fail when consumed by Pandas TA if there is no ```{"kind": "indicator name"}``` attribute. _Remember_ to check your spelling. -* For examples, see the [Pandas TA Study Examples Notebook](https://github.com/twopirllc/pandas-ta/blob/main/examples/PandasTA_Study_Examples.ipynb). +The `Study` [_Dataclass_](https://docs.python.org/3/library/dataclasses.html) can be used to name and group indicators and are executed by the Extension `study()` method. A `Study` can range from _complex_ with _Composition/Chaining_ to _simple_ like a `CommonStudy`. -_Required Arguments_ --------------------- -- **name**: Some short memorable string. _Note_: Case-insensitive "All" is reserved. +- Two premade _Studies_: `AllStudy` and `CommonStudy`. +- The `study()` method automatically appends to the DataFrame. + - Can be disabled by using the argument: `append=False`. +- All Studies use **mulitprocessing** _except_ when the cpu `cores` are set to zero or when using the `col_names` argument (see [below](#multiprocessing)). +- A Study will fail when consumed by Pandas TA if there is no `{"kind": "indicator name"}` attribute. _Remember_ to check your spelling. +- For examples, see the [Pandas TA Study Examples Notebook](https://github.com/twopirllc/pandas-ta/blob/main/examples/PandasTA_Study_Examples.ipynb). + +## _Required Arguments_ + +- **name**: Some short memorable string. _Note_: Case-insensitive "All" is reserved. - **ta**: A list of dicts containing keyword arguments to identify the indicator and the indicator's arguments -_Optional Arguments_ --------------------- -- **cores**: The number of cores to use for multiprocessing the **Study**. Default: ```multiprocessing.cpu_count()``` +## _Optional Arguments_ + +- **cores**: The number of cores to use for multiprocessing the **Study**. Default: `multiprocessing.cpu_count()` - **description**: A more detailed description of what the Study tries to capture. Default: None - **created**: At datetime string of when it was created. Default: Automatically generated.
-Types of Studies -================ +# Types of Studies + +## _Builtin_ -_Builtin_ ---------- ```python # The Default Study: ta.AllStudy # The following are equivalent: @@ -404,8 +409,8 @@ df.ta.study(ta.AllStudy) df.ta.study(ta.CommonStudy) ``` -_Categorical_ -------------- +## _Categorical_ + ```python # List of indicator categories df.ta.categories @@ -417,8 +422,8 @@ df.ta.study("Momentum") # Default values for all Momentum indicators df.ta.study("overlap", length=42) ``` -_Custom_ --------- +## _Custom_ + ```python # Help help(df.ta.study) @@ -442,11 +447,11 @@ Back to [Contents](#contents)
-**Multiprocessing** -======================= +# **Multiprocessing** + +The **Pandas TA** `study()` method utilizes **multiprocessing** for bulk indicator processing of all Study types with **ONE EXCEPTION!** When using the `col_names` parameter to rename resultant column(s), the indicators in `ta` array will be ran in order. -The **Pandas TA** ```study()``` method utilizes **multiprocessing** for bulk indicator processing of all Study types with **ONE EXCEPTION!** When using the ```col_names``` parameter to rename resultant column(s), the indicators in ```ta``` array will be ran in order. -* Multiprocessing isn't free, it comes with the cost of spinning up a Multiprocessing Pool, so lowering or disabling the ```cores``` can improve bulk processing. +- Multiprocessing isn't free, it comes with the cost of spinning up a Multiprocessing Pool, so lowering or disabling the `cores` can improve bulk processing. ```python # VWAP requires the DataFrame index to be a DatetimeIndex. @@ -488,7 +493,9 @@ df.columns
## Custom Study without Multiprocessing + **Remember** These will not be utilizing **multiprocessing** + ```python NonMPStudy = ta.Study( name="EMAs, BBs, and MACD", @@ -506,10 +513,8 @@ df.ta.study(NonMPStudy) Back to [Contents](#contents) -

- # **DataFrame Extension Properties** ## **adjusted** @@ -583,13 +588,12 @@ df.ta.time_range = "days" df.ta.time_range # prints DataFrame time in "days" as float ``` - Back to [Contents](#contents)

- # **DataFrame Extension Methods** + These are some additional methods available to the DataFrame Extension.
@@ -674,7 +678,9 @@ help(ta.study)
## **ticker** + ### [Yahoo Finance](https://github.com/ranaroussi/yfinance) + ```python # Download Chart history using yfinance. (pip install yfinance) # It uses the same keyword arguments as yfinance (excluding start and end) @@ -715,73 +721,75 @@ df.ta.to_utc()
# **Indicators** (_by Category_) + ### **Candles** (64) -Patterns that are **not bold**, require TA-Lib to be installed: ```pip install TA-Lib``` - -* 2crows -* 3blackcrows -* 3inside -* 3linestrike -* 3outside -* 3starsinsouth -* 3whitesoldiers -* abandonedbaby -* advanceblock -* belthold -* breakaway -* closingmarubozu -* concealbabyswall -* counterattack -* darkcloudcover -* **doji** -* dojistar -* dragonflydoji -* engulfing -* eveningdojistar -* eveningstar -* gapsidesidewhite -* gravestonedoji -* hammer -* hangingman -* harami -* haramicross -* highwave -* hikkake -* hikkakemod -* homingpigeon -* identical3crows -* inneck -* **inside** -* invertedhammer -* kicking -* kickingbylength -* ladderbottom -* longleggeddoji -* longline -* marubozu -* matchinglow -* mathold -* morningdojistar -* morningstar -* onneck -* piercing -* rickshawman -* risefall3methods -* separatinglines -* shootingstar -* shortline -* spinningtop -* stalledpattern -* sticksandwich -* takuri -* tasukigap -* thrusting -* tristar -* unique3river -* upsidegap2crows -* xsidegap3methods -* _Heikin-Ashi_: **ha** -* _Z Score_: **cdl_z** + +Patterns that are **not bold**, require TA-Lib to be installed: `pip install TA-Lib` + +- 2crows +- 3blackcrows +- 3inside +- 3linestrike +- 3outside +- 3starsinsouth +- 3whitesoldiers +- abandonedbaby +- advanceblock +- belthold +- breakaway +- closingmarubozu +- concealbabyswall +- counterattack +- darkcloudcover +- **doji** +- dojistar +- dragonflydoji +- engulfing +- eveningdojistar +- eveningstar +- gapsidesidewhite +- gravestonedoji +- hammer +- hangingman +- harami +- haramicross +- highwave +- hikkake +- hikkakemod +- homingpigeon +- identical3crows +- inneck +- **inside** +- invertedhammer +- kicking +- kickingbylength +- ladderbottom +- longleggeddoji +- longline +- marubozu +- matchinglow +- mathold +- morningdojistar +- morningstar +- onneck +- piercing +- rickshawman +- risefall3methods +- separatinglines +- shootingstar +- shortline +- spinningtop +- stalledpattern +- sticksandwich +- takuri +- tasukigap +- thrusting +- tristar +- unique3river +- upsidegap2crows +- xsidegap3methods +- _Heikin-Ashi_: **ha** +- _Z Score_: **cdl_z**
@@ -801,72 +809,73 @@ Back to [Contents](#contents)
### **Cycles** (2) -* _Even Better Sinewave_: **ebsw** -* _Reflex_: **reflex** - * **trendflex** companion + +- _Even Better Sinewave_: **ebsw** +- _Reflex_: **reflex** + - **trendflex** companion
-| _Even Better Sinewave_ (EBSW) | -|:--------:| +| _Even Better Sinewave_ (EBSW) | +| :-----------------------------------: | | ![Example EBSW](/images/SPY_EBSW.png) | -
### **Momentum** (43) -* _Awesome Oscillator_: **ao** -* _Absolute Price Oscillator_: **apo** -* _Bias_: **bias** -* _Balance of Power_: **bop** -* _BRAR_: **brar** -* _Commodity Channel Index_: **cci** -* _Chande Forecast Oscillator_: **cfo** -* _Center of Gravity_: **cg** -* _Chande Momentum Oscillator_: **cmo** -* _Coppock Curve_: **coppock** -* _Connors Relative Strenght Index_: **crsi** -* _Correlation Trend Indicator_: **cti** - * A wrapper for ```ta.linreg(series, r=True)``` -* _Directional Movement_: **dm** -* _Efficiency Ratio_: **er** -* _Elder Ray Index_: **eri** -* _Fisher Transform_: **fisher** -* _Inertia_: **inertia** -* _KDJ_: **kdj** -* _KST Oscillator_: **kst** -* _Moving Average Convergence Divergence_: **macd** -* _Momentum_: **mom** -* _Pretty Good Oscillator_: **pgo** -* _Percentage Price Oscillator_: **ppo** -* _Psychological Line_: **psl** -* _Quantitative Qualitative Estimation_: **qqe** -* _Rate of Change_: **roc** -* _Relative Strength Index_: **rsi** -* _Relative Strength Xtra_: **rsx** -* _Relative Vigor Index_: **rvgi** -* _Schaff Trend Cycle_: **stc** -* _Slope_: **slope** -* _SMI Ergodic_ **smi** -* _Squeeze_: **squeeze** - * Default is John Carter's. Enable Lazybear's with ```lazybear=True``` -* _Squeeze Pro_: **squeeze_pro** -* _Stochastic Oscillator_: **stoch** -* _Fast Stochastic Oscillator_: **stochf** -* _Stochastic RSI_: **stochrsi** -* _TD Sequential_: **td_seq** - * Excluded from ```df.ta.study()```. -* _True Momentum Oscillator_: **tmo** -* _Trix_: **trix** -* _True strength index_: **tsi** -* _Ultimate Oscillator_: **uo** -* _Williams %R_: **willr** + +- _Awesome Oscillator_: **ao** +- _Absolute Price Oscillator_: **apo** +- _Bias_: **bias** +- _Balance of Power_: **bop** +- _BRAR_: **brar** +- _Commodity Channel Index_: **cci** +- _Chande Forecast Oscillator_: **cfo** +- _Center of Gravity_: **cg** +- _Chande Momentum Oscillator_: **cmo** +- _Coppock Curve_: **coppock** +- _Connors Relative Strenght Index_: **crsi** +- _Correlation Trend Indicator_: **cti** + - A wrapper for `ta.linreg(series, r=True)` +- _Directional Movement_: **dm** +- _Efficiency Ratio_: **er** +- _Elder Ray Index_: **eri** +- _Fisher Transform_: **fisher** +- _Inertia_: **inertia** +- _KDJ_: **kdj** +- _KST Oscillator_: **kst** +- _Moving Average Convergence Divergence_: **macd** +- _Momentum_: **mom** +- _Pretty Good Oscillator_: **pgo** +- _Percentage Price Oscillator_: **ppo** +- _Psychological Line_: **psl** +- _Quantitative Qualitative Estimation_: **qqe** +- _Rate of Change_: **roc** +- _Relative Strength Index_: **rsi** +- _Relative Strength Xtra_: **rsx** +- _Relative Vigor Index_: **rvgi** +- _Schaff Trend Cycle_: **stc** +- _Slope_: **slope** +- _SMI Ergodic_ **smi** +- _Squeeze_: **squeeze** + - Default is John Carter's. Enable Lazybear's with `lazybear=True` +- _Squeeze Pro_: **squeeze_pro** +- _Stochastic Oscillator_: **stoch** +- _Fast Stochastic Oscillator_: **stochf** +- _Stochastic RSI_: **stochrsi** +- _TD Sequential_: **td_seq** + - Excluded from `df.ta.study()`. +- _True Momentum Oscillator_: **tmo** +- _Trix_: **trix** +- _True strength index_: **tsi** +- _Ultimate Oscillator_: **uo** +- _Williams %R_: **willr**
| _Moving Average Convergence Divergence_ (MACD) | -|:--------:| -| ![Example MACD](/images/SPY_MACD.png) | +| :--------------------------------------------: | +| ![Example MACD](/images/SPY_MACD.png) | Back to [Contents](#contents) @@ -874,53 +883,53 @@ Back to [Contents](#contents) ### **Overlap** (36) -* _Bill Williams Alligator_: **alligator** -* _Arnaud Legoux Moving Average_: **alma** -* _Double Exponential Moving Average_: **dema** -* _Exponential Moving Average_: **ema** -* _Fibonacci's Weighted Moving Average_: **fwma** -* _Gann High-Low Activator_: **hilo** -* _High-Low Average_: **hl2** -* _High-Low-Close Average_: **hlc3** - * Commonly known as 'Typical Price' in Technical Analysis literature -* _Hull Exponential Moving Average_: **hma** -* _Holt-Winter Moving Average_: **hwma** -* _Ichimoku Kinkō Hyō_: **ichimoku** - * Returns two DataFrames. For more information: ```help(ta.ichimoku)```. - * ```lookahead=False``` drops the Chikou Span Column to prevent potential data leak. -* _Jurik Moving Average_: **jma** -* _Kaufman's Adaptive Moving Average_: **kama** -* _Linear Regression_: **linreg** -* _Ehler's MESA Adaptive Moving Average_: **mama** - * Includes: **fama** -* _McGinley Dynamic_: **mcgd** -* _Midpoint_: **midpoint** -* _Midprice_: **midprice** -* _Open-High-Low-Close Average_: **ohlc4** -* _Pivots_: **pivots** -* _Pascal's Weighted Moving Average_: **pwma** -* _WildeR's Moving Average_: **rma** -* _Sine Weighted Moving Average_: **sinwma** -* _Simple Moving Average_: **sma** -* _Smoothed Moving Average_: **smma** -* _Ehler's Super Smoother Filter_: **ssf** - * Potential data leak. -* _Ehler's Super Smoother Filter (3 Poles)_: **ssf3** -* _Supertrend_: **supertrend** -* _Symmetric Weighted Moving Average_: **swma** -* _T3 Moving Average_: **t3** -* _Triple Exponential Moving Average_: **tema** -* _Triangular Moving Average_: **trima** -* _Variable Index Dynamic Average_: **vidya** -* _Weighted Closing Price_: **wcp** -* _Weighted Moving Average_: **wma** -* _Zero Lag Moving Average_: **zlma** +- _Bill Williams Alligator_: **alligator** +- _Arnaud Legoux Moving Average_: **alma** +- _Double Exponential Moving Average_: **dema** +- _Exponential Moving Average_: **ema** +- _Fibonacci's Weighted Moving Average_: **fwma** +- _Gann High-Low Activator_: **hilo** +- _High-Low Average_: **hl2** +- _High-Low-Close Average_: **hlc3** + - Commonly known as 'Typical Price' in Technical Analysis literature +- _Hull Exponential Moving Average_: **hma** +- _Holt-Winter Moving Average_: **hwma** +- _Ichimoku Kinkō Hyō_: **ichimoku** + - Returns two DataFrames. For more information: `help(ta.ichimoku)`. + - `lookahead=False` drops the Chikou Span Column to prevent potential data leak. +- _Jurik Moving Average_: **jma** +- _Kaufman's Adaptive Moving Average_: **kama** +- _Linear Regression_: **linreg** +- _Ehler's MESA Adaptive Moving Average_: **mama** + - Includes: **fama** +- _McGinley Dynamic_: **mcgd** +- _Midpoint_: **midpoint** +- _Midprice_: **midprice** +- _Open-High-Low-Close Average_: **ohlc4** +- _Pivots_: **pivots** +- _Pascal's Weighted Moving Average_: **pwma** +- _WildeR's Moving Average_: **rma** +- _Sine Weighted Moving Average_: **sinwma** +- _Simple Moving Average_: **sma** +- _Smoothed Moving Average_: **smma** +- _Ehler's Super Smoother Filter_: **ssf** + - Potential data leak. +- _Ehler's Super Smoother Filter (3 Poles)_: **ssf3** +- _Supertrend_: **supertrend** +- _Symmetric Weighted Moving Average_: **swma** +- _T3 Moving Average_: **t3** +- _Triple Exponential Moving Average_: **tema** +- _Triangular Moving Average_: **trima** +- _Variable Index Dynamic Average_: **vidya** +- _Weighted Closing Price_: **wcp** +- _Weighted Moving Average_: **wma** +- _Zero Lag Moving Average_: **zlma**
| _Exponential Moving Averages_ (EMA) and _Donchian Channels_ (KC) | -|:--------:| -| ![Example Chart](/images/SPY_Chart.png) | +| :--------------------------------------------------------------: | +| ![Example Chart](/images/SPY_Chart.png) | Back to [Contents](#contents) @@ -930,38 +939,38 @@ Back to [Contents](#contents) Use parameter: cumulative=**True** for cumulative results. -* _Draw Down_: **drawdown** -* _Log Return_: **log_return** -* _Percent Return_: **percent_return** +- _Draw Down_: **drawdown** +- _Log Return_: **log_return** +- _Percent Return_: **percent_return**
| _Log Returns_ (Cumulative) with _Exponential Moving Average_ (EMA) | -|:--------:| -| ![Example Cumulative Percent Return](/images/SPY_CLR.png) | +| :----------------------------------------------------------------: | +| ![Example Cumulative Percent Return](/images/SPY_CLR.png) |
### **Statistics** (11) -* _Entropy_: **entropy** -* _Kurtosis_: **kurtosis** - * Potential data leak. -* _Mean Absolute Deviation_: **mad** -* _Median_: **median** -* _Quantile_: **quantile** -* _Skew_: **skew** - * Potential data leak. -* _Standard Deviation_: **stdev** -* _Think or Swim Standard Deviation All_: **tos_stdevall** - * Potential data leak. -* _Variance_: **variance** -* _Z Score_: **zscore** +- _Entropy_: **entropy** +- _Kurtosis_: **kurtosis** + - Potential data leak. +- _Mean Absolute Deviation_: **mad** +- _Median_: **median** +- _Quantile_: **quantile** +- _Skew_: **skew** + - Potential data leak. +- _Standard Deviation_: **stdev** +- _Think or Swim Standard Deviation All_: **tos_stdevall** + - Potential data leak. +- _Variance_: **variance** +- _Z Score_: **zscore**
-| _Standard Deviation_ (STDEV) | -|:--------:| +| _Standard Deviation_ (STDEV) | +| :-------------------------------------: | | ![Example STDEV](/images/SPY_STDEV.png) | Back to [Contents](#contents) @@ -970,57 +979,58 @@ Back to [Contents](#contents) ### **Transform** (3) -* _Cube Transform_: **cube** - * Potential data leak due to signal shift. -* _Inverse Fisher Transform_: **ifisher** - * Potential data leak due to signal shift. -* _ReMap_: **remap** - -
- -### **Trend** (21) - -* _Average Directional Movement Index_: **adx** - * Also includes **adxr**, **dmp** and **dmn** in the resultant DataFrame. -* _Alpha Trend_: **alphatrend** -* _Archer Moving Averages Trends_: **amat** -* _Aroon & Aroon Oscillator_: **aroon** -* _Choppiness Index_: **chop** -* _Chande Kroll Stop_: **cksp** -* _Decay_: **decay** - * Formally: **linear_decay** -* _Decreasing_: **decreasing** -* _Detrended Price Oscillator_: **dpo** - * Set ```lookahead=False``` to disable centering and remove potential data leak. -* _Increasing_: **increasing** -* _Long Run_: **long_run** -* _Parabolic Stop and Reverse_: **psar** -* _Q Stick_: **qstick** -* _Random Walk Index_: **rwi** -* _Short Run_: **short_run** -* _Trendflex_: **trendflex** - * **reflex** companion -* _Trend Signals_: **tsignals** -* _TTM Trend_: **ttm_trend** -* _Vertical Horizontal Filter_: **vhf** -* _Vortex_: **vortex** -* _Cross Signals_: **xsignals** +- _Cube Transform_: **cube** + - Potential data leak due to signal shift. +- _Inverse Fisher Transform_: **ifisher** + - Potential data leak due to signal shift. +- _ReMap_: **remap** + +
+ +### **Trend** (22) + +- _Average Directional Movement Index_: **adx** + - Also includes **adxr**, **dmp** and **dmn** in the resultant DataFrame. +- _Alpha Trend_: **alphatrend** +- _Archer Moving Averages Trends_: **amat** +- _Aroon & Aroon Oscillator_: **aroon** +- _Choppiness Index_: **chop** +- _Chande Kroll Stop_: **cksp** +- _Decay_: **decay** + - Formally: **linear_decay** +- _Decreasing_: **decreasing** +- _Detrended Price Oscillator_: **dpo** + - Set `lookahead=False` to disable centering and remove potential data leak. +- _Hilbert Transform Trendline_: **ht_trendline** +- _Increasing_: **increasing** +- _Long Run_: **long_run** +- _Parabolic Stop and Reverse_: **psar** +- _Q Stick_: **qstick** +- _Random Walk Index_: **rwi** +- _Short Run_: **short_run** +- _Trendflex_: **trendflex** + - **reflex** companion +- _Trend Signals_: **tsignals** +- _TTM Trend_: **ttm_trend** +- _Vertical Horizontal Filter_: **vhf** +- _Vortex_: **vortex** +- _Cross Signals_: **xsignals**
| _Average Directional Movement Index_ (ADX) | -|:--------:| -| ![Example ADX](/images/SPY_ADX.png) | +| :----------------------------------------: | +| ![Example ADX](/images/SPY_ADX.png) |
### **Utility** (5) -* _Above_: **above** -* _Above Value_: **above_value** -* _Below_: **below** -* _Below Value_: **below_value** -* _Cross_: **cross** +- _Above_: **above** +- _Above Value_: **above_value** +- _Below_: **below** +- _Below Value_: **below_value** +- _Cross_: **cross** Back to [Contents](#contents) @@ -1028,59 +1038,59 @@ Back to [Contents](#contents) ### **Volatility** (16) -* _Aberration_: **aberration** -* _Acceleration Bands_: **accbands** -* _Average True Range_: **atr** -* _Average True Range Trailing Stop_: **atrts** -* _Bollinger Bands_: **bbands** -* _Chandelier Exit_: **chandelier_exit** -* _Donchian Channel_: **donchian** -* _Holt-Winter Channel_: **hwc** -* _Keltner Channel_: **kc** -* _Mass Index_: **massi** -* _Normalized Average True Range_: **natr** -* _Price Distance_: **pdist** -* _Relative Volatility Index_: **rvi** -* _Elder's Thermometer_: **thermo** -* _True Range_: **true_range** -* _Ulcer Index_: **ui** - -
- -| _Average True Range_ (ATR) | -|:--------:| +- _Aberration_: **aberration** +- _Acceleration Bands_: **accbands** +- _Average True Range_: **atr** +- _Average True Range Trailing Stop_: **atrts** +- _Bollinger Bands_: **bbands** +- _Chandelier Exit_: **chandelier_exit** +- _Donchian Channel_: **donchian** +- _Holt-Winter Channel_: **hwc** +- _Keltner Channel_: **kc** +- _Mass Index_: **massi** +- _Normalized Average True Range_: **natr** +- _Price Distance_: **pdist** +- _Relative Volatility Index_: **rvi** +- _Elder's Thermometer_: **thermo** +- _True Range_: **true_range** +- _Ulcer Index_: **ui** + +
+ +| _Average True Range_ (ATR) | +| :---------------------------------: | | ![Example ATR](/images/SPY_ATR.png) |
### **Volume** (20) -* _Accumulation/Distribution Index_: **ad** -* _Accumulation/Distribution Oscillator_: **adosc** -* _Archer On-Balance Volume_: **aobv** -* _Chaikin Money Flow_: **cmf** -* _Elder's Force Index_: **efi** -* _Ease of Movement_: **eom** -* _Klinger Volume Oscillator_: **kvo** -* _Money Flow Index_: **mfi** -* _Negative Volume Index_: **nvi** -* _On-Balance Volume_: **obv** -* _Positive Volume Index_: **pvi** -* _Percentage Volume Oscillator_: **pvo** -* _Price-Volume_: **pvol** -* _Price Volume Rank_: **pvr** -* _Price Volume Trend_: **pvt** -* _Volume Heat Map_: **vhm** -* _Volume Profile_: **vp** -* _Volume Weighted Average Price_: **vwap** - * **Requires** the DataFrame index to be a DatetimeIndex -* _Volume Weighted Moving Average_: **vwma** -* _Worden Brothers Time Segmented Value_: **wb_tsv** - -
- -| _On-Balance Volume_ (OBV) | -|:--------:| +- _Accumulation/Distribution Index_: **ad** +- _Accumulation/Distribution Oscillator_: **adosc** +- _Archer On-Balance Volume_: **aobv** +- _Chaikin Money Flow_: **cmf** +- _Elder's Force Index_: **efi** +- _Ease of Movement_: **eom** +- _Klinger Volume Oscillator_: **kvo** +- _Money Flow Index_: **mfi** +- _Negative Volume Index_: **nvi** +- _On-Balance Volume_: **obv** +- _Positive Volume Index_: **pvi** +- _Percentage Volume Oscillator_: **pvo** +- _Price-Volume_: **pvol** +- _Price Volume Rank_: **pvr** +- _Price Volume Trend_: **pvt** +- _Volume Heat Map_: **vhm** +- _Volume Profile_: **vp** +- _Volume Weighted Average Price_: **vwap** + - **Requires** the DataFrame index to be a DatetimeIndex +- _Volume Weighted Moving Average_: **vwma** +- _Worden Brothers Time Segmented Value_: **wb_tsv** + +
+ +| _On-Balance Volume_ (OBV) | +| :---------------------------------: | | ![Example OBV](/images/SPY_OBV.png) | Back to [Contents](#contents) @@ -1088,35 +1098,37 @@ Back to [Contents](#contents)
# **Backtesting** -While Pandas TA is not a backtesting application, it does provide _two_ trend methods that generate trading signals for backtesting purposes: **Trend Signals** (```ta.tsignals()```) and **Cross Signals** (```ta.xsignals()```). Both Signal methods return a DataFrame with columns for the signal's Trend, Trades, Entries and Exits. + +While Pandas TA is not a backtesting application, it does provide _two_ trend methods that generate trading signals for backtesting purposes: **Trend Signals** (`ta.tsignals()`) and **Cross Signals** (`ta.xsignals()`). Both Signal methods return a DataFrame with columns for the signal's Trend, Trades, Entries and Exits. A simple manual backtest using **Trend Signals** can be found in the [TA Analysis Notebook](https://github.com/twopirllc/pandas-ta/blob/main/examples/TA_Analysis.ipynb) starting at _Trend Creation_ cell.
-Trend Signals -------------- -* Useful for signals based on trends or **states**. -* _Examples_ - * **Golden Cross**: ```df.ta.sma(length=50) > df.ta.sma(length=200)``` - * **Positive MACD Histogram**: ```df.ta.macd().iloc[:,1] > 0``` +## Trend Signals + +- Useful for signals based on trends or **states**. +- _Examples_ + - **Golden Cross**: `df.ta.sma(length=50) > df.ta.sma(length=200)` + - **Positive MACD Histogram**: `df.ta.macd().iloc[:,1] > 0` -Cross Signals -------------- -* Useful for Signal Crossings or **events**. -* _Examples_ - * RSI crosses above 30 and then below 70 - * ZSCORE crosses above -2 and then below 2. +## Cross Signals + +- Useful for Signal Crossings or **events**. +- _Examples_ + - RSI crosses above 30 and then below 70 + - ZSCORE crosses above -2 and then below 2.
## Vector BT + _Ideally_ a backtesting application like [**vectorbt**](https://polakowo.io/vectorbt/) should be used. For an example comparing a _Buy and Hold Strategy_ versus a _TA Signal Strategy_, see: [VectorBT Backtest with Pandas TA Notebook](https://github.com/twopirllc/pandas-ta/blob/main/examples/VectorBT_Backtest_with_Pandas_TA.ipynb).
-Trend Signal Example --------------------- +## Trend Signal Example + ```python import pandas_ta as ta import vectorbt as vbt @@ -1140,8 +1152,8 @@ print(pf.returns_stats())
-Cross Signal Example --------------------- +## Cross Signal Example + ```python import pandas_ta as ta import vectorbt as vbt @@ -1167,11 +1179,11 @@ Back to [Contents](#contents)
- # BETA Pandas TA also includes basic _Performance Metrics_. -* :chart_with_upwards_trend: Contributions are welcome to improve and stablize them. + +- :chart_with_upwards_trend: Contributions are welcome to improve and stablize them.
@@ -1191,50 +1203,50 @@ result = ta.cagr(df.close) help(ta.cagr) ``` -Metrics -------- +## Metrics + The current metrics include: -* _Compounded Annual Growth Rate_: **cagr** -* _Calmar Ratio_: **calmar_ratio** -* _Downside Deviation_: **downside_deviation** -* _Jensen's Alpha_: **jensens_alpha** -* _Log Max Drawdown_: **log_max_drawdown** -* _Max Drawdown_: **max_drawdown** -* _Pure Profit Score_: **pure_profit_score** -* _Sharpe Ratio_: **sharpe_ratio** -* _Sortino Ratio_: **sortino_ratio** -* _Volatility_: **volatility** + +- _Compounded Annual Growth Rate_: **cagr** +- _Calmar Ratio_: **calmar_ratio** +- _Downside Deviation_: **downside_deviation** +- _Jensen's Alpha_: **jensens_alpha** +- _Log Max Drawdown_: **log_max_drawdown** +- _Max Drawdown_: **max_drawdown** +- _Pure Profit Score_: **pure_profit_score** +- _Sharpe Ratio_: **sharpe_ratio** +- _Sortino Ratio_: **sortino_ratio** +- _Volatility_: **volatility** Back to [Contents](#contents)
-TODO ----- +## TODO -| **Status** | **Remaining TA Lib Indicators** | -| - | - | -| ☐ | Indicators: ```ht_dcperiod```, ```ht_dcphase```, ```ht_phasor```, ```ht_sine```, ```ht_trendline```, ```ht_trendmode``` | -| ☐ | **Numpy**/**Numba**_-ify_ base indicators | +| **Status** | **Remaining TA Lib Indicators** | +| ---------- | ----------------------------------------------------------------------------------------------- | +| ☐ | Indicators: `ht_dcperiod`, `ht_dcphase`, `ht_phasor`, `ht_sine`, `ht_trendline`, `ht_trendmode` | +| ☐ | **Numpy**/**Numba**_-ify_ base indicators |
-| **Status** | **Config System** | -| - | - | -| ☐ | Candlesticks | -| ☐ | DataFrame Extension property: ```config``` | -| ☐ | JSON Config File | -| | ☐ JSON Config File Format | +| **Status** | **Config System** | +| ---------- | -------------------------------------- | +| ☐ | Candlesticks | +| ☐ | DataFrame Extension property: `config` | +| ☐ | JSON Config File | +| | ☐ JSON Config File Format |
-| **Status** | **Stabilize** | -| - | - | -| ☐ | Trading Signals | -| | ☐ Trend Signals | -| | ☐ Cross Signals | -| ☐ | Performance Metrics | -| ✔ | Better argument validation | +| **Status** | **Stabilize** | +| ---------- | -------------------------- | +| ☐ | Trading Signals | +| | ☐ Trend Signals | +| | ☐ Cross Signals | +| ☐ | Performance Metrics | +| ✔ | Better argument validation |
@@ -1243,10 +1255,13 @@ Back to [Contents](#contents)
# **Sources** + ### Technical Analysis + [Original TA-LIB](https://ta-lib.org/) | [TradingView](http://www.tradingview.com) | [Sierra Chart](https://search.sierrachart.com/?Query=indicators&submitted=true) | [MQL5](https://www.mql5.com) | [FM Labs](https://www.fmlabs.com/reference/default.htm) | [Pro Real Code](https://www.prorealcode.com/prorealtime-indicators) | [User 42](https://user42.tuxfamily.org/chart/manual/index.html) | [Technical Traders](http://technical.traders.com/tradersonline/FeedTT-2014.html) ### Supplemental + [What Every Computer Scientist Should Know About Floating-Point Arithmetic](https://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html)
@@ -1254,12 +1269,14 @@ Back to [Contents](#contents) # **Support** Like the package, want more indicators and features? Continued Support? -* Donations help cover data and API costs so platform indicators (like [TradingView](https://github.com/tradingview/)) are accurate. -* I appreciate **ALL** of those that have bought me Coffee/Beer/Wine et al. I greatly appreciate it! 😎 + +- Donations help cover data and API costs so platform indicators (like [TradingView](https://github.com/tradingview/)) are accurate. +- I appreciate **ALL** of those that have bought me Coffee/Beer/Wine et al. I greatly appreciate it! 😎
### Consider + [!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/twopirllc)
diff --git a/pandas_ta/_typing.py b/pandas_ta/_typing.py index 0c796245..12bb924f 100644 --- a/pandas_ta/_typing.py +++ b/pandas_ta/_typing.py @@ -59,4 +59,4 @@ MaybeSeriesFrame = Union[T, Series, DataFrame] AnyArray = Union[Array, Series, DataFrame] AnyArray1d = Union[Array1d, Series] -AnyArray2d = Union[Array2d, DataFrame] \ No newline at end of file +AnyArray2d = Union[Array2d, DataFrame] diff --git a/pandas_ta/overlap/mama.py b/pandas_ta/overlap/mama.py index e2994457..97b5160e 100644 --- a/pandas_ta/overlap/mama.py +++ b/pandas_ta/overlap/mama.py @@ -11,37 +11,39 @@ # http://traders.com/documentation/feedbk_docs/2014/01/traderstips.html @njit def np_mama(x, fastlimit, slowlimit, prenan): - a1, a2 = 0.0962, 0.5769 - p_w, smp_w, smp_w_c = 0.2, 0.33, 0.67 # smp_w + smp_w_c = 1 - - sm = zeros_like(x) - dt, smp, q1, q2 = sm.copy(), sm.copy(), sm.copy(), sm.copy() - i1, i2, jI, jQ = sm.copy(), sm.copy(), sm.copy(), sm.copy() - re, im, alpha = sm.copy(), sm.copy(), sm.copy() - period, phase, mama, fama = sm.copy(), sm.copy(), sm.copy(), sm.copy() - - # Ehler's starts from 6, TV-LB starts at 3, TALib 32 - n = x.size - for i in range(3, n): - w_period = .075 * period[i - 1] + .54 - - # Smoother and Detrend the Smoother - sm[i] = 0.4 * x[i] + 0.3 * x[i - 1] + 0.2 * x[i - 2] + 0.1 * x[i - 3] - dt[i] = w_period * (a1 * sm[i] + a2 * sm[i - 2] - a2 * sm[i - 4] - a1 * sm[i - 6]) + a, b, m = 0.0962, 0.5769, x.size + p_w, smp_w, smp_w_c = 0.2, 0.33, 0.67 + + wma4 = zeros_like(x) + dt, smp = zeros_like(x), zeros_like(x) + i1, i2 = zeros_like(x), zeros_like(x) + ji, jq = zeros_like(x), zeros_like(x) + q1, q2 = zeros_like(x), zeros_like(x) + re, im, alpha = zeros_like(x), zeros_like(x), zeros_like(x) + period, phase = zeros_like(x), zeros_like(x) + mama, fama = zeros_like(x), zeros_like(x) + + # Ehler's starts from 6, TV-LB from 3, TALib from 32 + for i in range(3, m): + adj_prev_period = 0.075 * period[i - 1] + 0.54 + + # WMA(x,4) & Detrended WMA(x,4) + wma4[i] = 0.4 * x[i] + 0.3 * x[i - 1] + 0.2 * x[i - 2] + 0.1 * x[i - 3] + dt[i] = adj_prev_period * (a * wma4[i] + b * wma4[i - 2] - b * wma4[i - 4] - a * wma4[i - 6]) # Quadrature(Detrender) and In Phase Component - q1[i] = w_period * (a1 * dt[i] + a2 * dt[i - 2] - a2 * dt[i - 4] - a1 * dt[i - 6]) + q1[i] = adj_prev_period * (a * dt[i] + b * dt[i - 2] - b * dt[i - 4] - a * dt[i - 6]) i1[i] = dt[i - 3] - # Phase advance I1 and Q1 by 90 degrees - jI[i] = w_period * (a1 * i1[i] + a2 * i1[i - 2] - a2 * i1[i - 4] - a1 * i1[i - 6]) - jQ[i] = w_period * (a1 * q1[i] + a2 * q1[i - 2] - a2 * q1[i - 4] - a1 * q1[i - 6]) + # Phase Q1 and I1 by 90 degrees + ji[i] = adj_prev_period * (a * i1[i] + b * i1[i - 2] - b * i1[i - 4] - a * i1[i - 6]) + jq[i] = adj_prev_period * (a * q1[i] + b * q1[i - 2] - b * q1[i - 4] - a * q1[i - 6]) # Phasor Addition for 3 Bar Averaging - i2[i] = i1[i] - jQ[i] - q2[i] = q1[i] + jI[i] + i2[i] = i1[i] - jq[i] + q2[i] = q1[i] + ji[i] - # Smooth I and Q components + # Smooth I2 & Q2 i2[i] = p_w * i2[i] + (1 - p_w) * i2[i - 1] q2[i] = p_w * q2[i] + (1 - p_w) * q2[i - 1] @@ -49,6 +51,7 @@ def np_mama(x, fastlimit, slowlimit, prenan): re[i] = i2[i] * i2[i - 1] + q2[i] * q2[i - 1] im[i] = i2[i] * q2[i - 1] + q2[i] * i2[i - 1] + # Smooth Re & Im re[i] = p_w * re[i] + (1 - p_w) * re[i - 1] im[i] = p_w * im[i] + (1 - p_w) * im[i - 1] diff --git a/pandas_ta/overlap/pivots.py b/pandas_ta/overlap/pivots.py index dee6e703..30ab778c 100644 --- a/pandas_ta/overlap/pivots.py +++ b/pandas_ta/overlap/pivots.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from numpy import NaN, greater, zeros_like +from numpy import greater, nan, zeros_like from numba import njit from pandas import DataFrame, Series, Timedelta, infer_freq, to_datetime from pandas_ta._typing import Array, DictLike @@ -197,7 +197,7 @@ def pivots( # Create nan arrays for "demark" and "fibonacci" pivots _nan_array = zeros_like(np_close) - _nan_array[:] = NaN + _nan_array[:] = nan tp = s1 = s2 = s3 = s4 = r1 = r2 = r3 = r4 = _nan_array # Calculate diff --git a/pandas_ta/overlap/wma.py b/pandas_ta/overlap/wma.py index d13be9e7..c98d448c 100644 --- a/pandas_ta/overlap/wma.py +++ b/pandas_ta/overlap/wma.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -from numpy import arange, dot +from numpy import arange, dot, float64, nan, zeros_like +from numba import njit from pandas import Series from pandas_ta._typing import DictLike, Int from pandas_ta.maps import Imports @@ -12,6 +13,25 @@ ) +@njit +def np_wma(x, n, asc, prenan): + m = x.size + w = arange(1, n + 1, dtype=float64) + result = zeros_like(x, dtype=float64) + + if not asc: + w = w[::-1] + + for i in range(n - 1, m): + result[i] = (w * x[i - n + 1:i + 1]).sum() + result *= 2 / (n * n + n) + + if prenan: + result[:n - 1] = nan + + return result + + def wma( close: Series, length: Int = None, asc: bool = None, talib: bool = None, @@ -56,17 +76,9 @@ def wma( from talib import WMA wma = WMA(close, length) else: - total_weight = 0.5 * length * (length + 1) - weights_ = Series(arange(1, length + 1)) - weights = weights_ if asc else weights_[::-1] - - def linear(w): - def _compute(x): - return dot(x, w) / total_weight - return _compute - - close_ = close.rolling(length, min_periods=length) - wma = close_.apply(linear(weights), raw=True) + np_close = close.values + wma_ = np_wma(np_close, length, asc, True) + wma = Series(wma_, index=close.index) # Offset if offset != 0: diff --git a/pandas_ta/trend/alphatrend.py b/pandas_ta/trend/alphatrend.py index fd8299d3..f4a39252 100644 --- a/pandas_ta/trend/alphatrend.py +++ b/pandas_ta/trend/alphatrend.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from numpy import isnan, nan, zeros +from numpy import isnan, nan, zeros_like from numba import njit from pandas import DataFrame, Series from pandas_ta._typing import Array, DictLike, Int, IntFloat @@ -16,11 +16,10 @@ ) -# Alphatrend alpha threshold calculation @njit def np_alpha(low_atr, high_atr, momo_threshold): m = momo_threshold.size - alpha = zeros(m) + alpha = zeros_like(low_atr) for i in range(1, m): if momo_threshold[i]: diff --git a/pandas_ta/trend/ht_trendline.py b/pandas_ta/trend/ht_trendline.py index 235a832a..4e626713 100644 --- a/pandas_ta/trend/ht_trendline.py +++ b/pandas_ta/trend/ht_trendline.py @@ -1,95 +1,92 @@ # -*- coding: utf-8 -*- -from numpy import nan, zeros_like, arctan, zeros +from numpy import arctan, copy, isnan, nan, rad2deg, zeros_like, zeros from numba import njit -from pandas import DataFrame, Series +from pandas import Series from pandas_ta._typing import DictLike, Int, IntFloat from pandas_ta.maps import Imports -from pandas_ta.utils import v_offset, v_series, v_talib +from pandas_ta.utils import ( + v_bool, + v_offset, + v_pos_default, + v_series, + v_talib +) @njit def np_ht_trendline(x): - # Variables used for the Hilbert Transformation - a, b = 0.0962, 0.5769 - rad_to_deg = 45 / arctan(1) - period, smooth_period = 0.0, 0.0 - - m = x.size - smooth_price = zeros(m) - de_trender = zeros(m) - q1 = zeros(m) - i1 = zeros(m) - i2 = zeros(m) - q2 = zeros(m) - _re = zeros(m) - _im = zeros(m) - i_trend = zeros(m) - trend_line = zeros(m) - - for i in range(x.size): - if i < 50: - smooth_price[i] = 0 - else: - smooth_price[i] = (4 * x[i] + 3 * x[i - 1] + 2 * x[i - 2] + x[i - 3]) / 10 - - adjusted_prev_period = 0.075 * period + 0.54 - - de_trender[i] = (a * smooth_price[i] + b * smooth_price[i - 2] - - b * smooth_price[i - 4] - a * smooth_price[i - 6]) * adjusted_prev_period - - q1[i] = (a * de_trender[i] + b * de_trender[i - 2] - - b * de_trender[i - 4] - a * de_trender[i - 6]) * adjusted_prev_period - i1[i] = de_trender[i - 3] - ji = (a * i1[i] + b * i1[i - 2] - b * i1[i - 4] - a * i1[i - 6]) * adjusted_prev_period - jq = (a * q1[i] + b * q1[i - 2] - b * q1[i - 4] - a * q1[i - 6]) * adjusted_prev_period - - i2[i] = i1[i] - jq - q2[i] = q1[i] + ji + a, b, m = 0.0962, 0.5769, x.size + + wma4, dt = zeros_like(x), zeros_like(x) + q1, q2 = zeros_like(x), zeros_like(x) + ji, jq = zeros_like(x), zeros_like(x) + i1, i2 = zeros_like(x), zeros_like(x) + re, im = zeros_like(x), zeros_like(x) + period, smooth_period = zeros_like(x), zeros_like(x) + i_trend = zeros_like(x) + + result = zeros_like(x) + result[:13] = x[:13] + + # Ehler's starts from 6, TALib from 63 + for i in range(6, m): + adj_prev_period = 0.075 * period[i - 1] + 0.54 + + wma4[i] = 0.4 * x[i] + 0.3 * x[i - 1] + 0.2 * x[i - 2] + 0.1 * x[i - 3] + dt[i] = adj_prev_period * (a * wma4[i] + b * wma4[i - 2] - b * wma4[i - 4] - a * wma4[i - 6]) + + q1[i] = adj_prev_period * (a * dt[i] + b * dt[i - 2] - b * dt[i - 4] - a * dt[i - 6]) + i1[i] = dt[i - 3] + + ji[i] = adj_prev_period * (a * i1[i] + b * i1[i - 2] - b * i1[i - 4] - a * i1[i - 6]) + jq[i] = adj_prev_period * (a * q1[i] + b * q1[i - 2] - b * q1[i - 4] - a * q1[i - 6]) + + i2[i] = i1[i] - jq[i] + q2[i] = q1[i] + ji[i] i2[i] = 0.2 * i2[i] + 0.8 * i2[i - 1] q2[i] = 0.2 * q2[i] + 0.8 * q2[i - 1] - _re[i] = i2[i] * i2[i - 1] + q2[i] * q2[i - 1] - _im[i] = i2[i] * q2[i - 1] - q2[i] * i2[i - 1] - - _re[i] = 0.2 * _re[i] + 0.8 * _re[i - 1] - _im[i] = 0.2 * _im[i] + 0.8 * _im[i - 1] - - new_period = 0 - if _re[i] != 0 and _im[i] != 0: - new_period = 360 / (arctan(_im[i]/_re[i]) * rad_to_deg) - if new_period > 1.5 * period: - new_period = 1.5 * period - if new_period < 0.67 * period: - new_period = 0.67 * period - if new_period < 6: - new_period = 6 - if new_period > 50: - new_period = 50 - period = 0.2 * new_period + 0.8 * period - smooth_period = 0.33 * period + 0.67 * smooth_period - - dc_period = int(smooth_period + 0.5) - temp_real = 0 + re[i] = i2[i] * i2[i - 1] + q2[i] * q2[i - 1] + im[i] = i2[i] * q2[i - 1] - q2[i] * i2[i - 1] + + re[i] = 0.2 * re[i] + 0.8 * re[i - 1] + im[i] = 0.2 * im[i] + 0.8 * im[i - 1] + + if re[i] != 0 and im[i] != 0: + period[i] = 360.0 / rad2deg(arctan(im[i] / re[i])) + if period[i] > 1.5 * period[i - 1]: + period[i] = 1.5 * period[i - 1] + if period[i] < 0.67 * period[i - 1]: + period[i] = 0.67 * period[i - 1] + if period[i] < 6.0: + period[i] = 6.0 + if period[i] > 50.0: + period[i] = 50.0 + period[i] = 0.2 * period[i] + 0.8 * period[i - 1] + smooth_period[i] = 0.33 * period[i] + 0.67 * smooth_period[i - 1] + + dc_period = int(smooth_period[i] + 0.5) + dcp_avg = 0 for k in range(dc_period): - temp_real += x[i - k] + dcp_avg += x[i - k] if dc_period > 0: - temp_real /= dc_period + dcp_avg /= dc_period - i_trend[i] = temp_real + i_trend[i] = dcp_avg - if i < 12: - trend_line[i] = x[i] - else: - trend_line[i] = (4 * i_trend[i] + 3 * i_trend[i - 1] + 2 * i_trend[i - 2] + i_trend[i - 3]) / 10.0 + if i > 12: + result[i] = 0.4 * i_trend[i] + 0.3 * i_trend[i - 1] + 0.2 * i_trend[i - 2] + 0.1 * i_trend[i - 3] - return trend_line + return result def ht_trendline( - close: Series = None, talib: bool = None, offset: Int = None, **kwargs: DictLike -) -> DataFrame: + close: Series = None, talib: bool = None, + prenan: Int = None, offset: Int = None, + **kwargs: DictLike +) -> Series: """Hilbert Transform TrendLine (Also known as Instantaneous TrendLine) By removing Dominant Cycle (DC) of the time-series from itself, ht_trendline is calculated. @@ -100,7 +97,9 @@ def ht_trendline( Args: close (pd.Series): Series of 'close's. talib (bool): If TA Lib is installed and talib is True, Returns - the TA Lib version. Default: None + the TA Lib version. Default: True + prenan (int): Prenans to apply. Ehler's 6 or 12, TALib 63 + Default: 63 offset (int, optional): How many periods to offset the result. Default: 0 Kwargs: @@ -111,38 +110,40 @@ def ht_trendline( pd.DataFrame: Hilbert Transformation Instantaneous Trend-line. """ # Validate - _length = 1 - close = v_series(close, _length) + prenan = v_pos_default(prenan, 63) + close = v_series(close, prenan) if close is None: return mode_tal = v_talib(talib) + offset = v_offset(offset) + if Imports["talib"] and mode_tal: from talib import HT_TRENDLINE - trend_line = HT_TRENDLINE(close) + tl = HT_TRENDLINE(close) else: - # calculate ht_trendline using numba np_close = close.values - trend_line = np_ht_trendline(np_close) + np_tl = np_ht_trendline(np_close) - offset = v_offset(offset) + if prenan > 0: + np_tl[:prenan] = nan + tl = Series(np_tl, index=close.index) + + if all(isnan(tl)): + return # Emergency Break # Offset if offset != 0: - trend_line = trend_line.shift(offset) + trend_line = tl.shift(offset) # Fill if "fillna" in kwargs: - trend_line.fillna(kwargs["fillna"], inplace=True) + tl.fillna(kwargs["fillna"], inplace=True) if "fill_method" in kwargs: - trend_line.fillna(method=kwargs["fill_method"], inplace=True) + tl.fillna(method=kwargs["fill_method"], inplace=True) - data = { - "ht_trendline": trend_line, - } - df = DataFrame(data, index=close.index) - df.name = "ht_trendline" - df.category = "trend" + tl.name = f"HT_TL" + tl.category = "trend" - return df + return tl diff --git a/pandas_ta/trend/rwi.py b/pandas_ta/trend/rwi.py index 89d185e7..92cea06e 100644 --- a/pandas_ta/trend/rwi.py +++ b/pandas_ta/trend/rwi.py @@ -66,8 +66,8 @@ def rwi( ) if all(isnan(atr_)): return # Emergency Break - denom = atr_ * (length ** 0.5) + denom = atr_ * (length ** 0.5) rwi_high = (high - low.shift(length)) / denom rwi_low = (high.shift(length) - low) / denom diff --git a/pandas_ta/utils/_core.py b/pandas_ta/utils/_core.py index c4436677..e25c678f 100644 --- a/pandas_ta/utils/_core.py +++ b/pandas_ta/utils/_core.py @@ -93,7 +93,7 @@ def signed_series(series: Series, initial: Int, lag: Int = None) -> Series: Default Example: series = Series([3, 2, 2, 1, 1, 5, 6, 6, 7, 5]) and returns: - sign = Series([NaN, -1.0, 0.0, -1.0, 0.0, 1.0, 1.0, 0.0, 1.0, -1.0]) + sign = Series([nan, -1.0, 0.0, -1.0, 0.0, 1.0, 1.0, 0.0, 1.0, -1.0]) """ initial = None if initial is not None and not isinstance(lag, str): @@ -138,7 +138,7 @@ def tal_ma(name: str) -> Int: return 0 # Default: SMA -> 0 -def unsigned_differences(series: Series, amount: Int = None, +def unsigned_differences(series: Series, lag: Int = None, **kwargs) -> Union[Series, Series]: """Unsigned Differences Returns two Series, an unsigned positive and unsigned negative series based @@ -150,8 +150,8 @@ def unsigned_differences(series: Series, amount: Int = None, positive = Series([0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0]) negative = Series([0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1]) """ - amount = int(amount) if amount is not None else 1 - negative = series.diff(amount) + lag = int(lag) if lag is not None else 1 + negative = series.diff(lag) negative.fillna(0, inplace=True) positive = negative.copy() diff --git a/pandas_ta/utils/_math.py b/pandas_ta/utils/_math.py index ce0765e8..c2f174cb 100644 --- a/pandas_ta/utils/_math.py +++ b/pandas_ta/utils/_math.py @@ -4,9 +4,13 @@ from operator import mul from sys import float_info as sflt -from numpy import all, append, array, corrcoef, dot, exp, fabs -from numpy import log, nan, ndarray, ones, seterr, sign, sqrt, sum, triu +from numpy import ( + all, append, array, corrcoef, dot, exp, fabs, float64, + log, nan, ndarray, ones, seterr, sign, sqrt, sum, triu, + zeros +) from pandas import DataFrame, Series +from numba import njit from pandas_ta._typing import ( Array, @@ -78,31 +82,21 @@ def erf(x: IntFloat) -> Float: * t + a1) * t * exp(-x * x) return x_sign * y # erf(-x) = -erf(x) -def fibonacci( - n: Int = 2, weighted: bool = False, zero: bool = False -) -> Array: - """Fibonacci Sequence as a numpy array""" - n = int(fabs(n)) if n >= 0 else 2 - if zero: - a, b = 0, 1 - else: - n -= 1 - a, b = 1, 1 +@njit +def fibonacci(n, weighted): + n = n if n > 1 else 2 + sqrt5 = sqrt(5.0) + phi, psi = 0.5 * (1.0 + sqrt5), 0.5 * (1.0 - sqrt5) - result = array([a]) - for _ in range(0, n): - a, b = b, a + b - result = append(result, a) + result = zeros(n) + for i in range(0, n): + result[i] = float(phi ** (i + 1) - psi ** (i + 1)) / sqrt5 if weighted: - fib_sum = sum(result) - if fib_sum > 0: - return result / fib_sum - else: - return result - else: - return result + return result / result.sum() + return result + def geometric_mean(series: Series) -> Float: """Returns the Geometric Mean for a Series of positive values.""" @@ -216,7 +210,7 @@ def strided_window(x: Array, length: Int) -> Array: def symmetric_triangle( n: Int = None, weighted: bool = False ) -> Optional[List[int]]: - """Symmetric Triangle with n >= 2 + """Symmetric Triangle whenever n >= 2 Returns a numpy array of the nth row of Symmetric Triangle. n=4 => triangle: [1, 2, 2, 1] diff --git a/pandas_ta/utils/_signals.py b/pandas_ta/utils/_signals.py index 65bdb626..c5191fda 100644 --- a/pandas_ta/utils/_signals.py +++ b/pandas_ta/utils/_signals.py @@ -135,7 +135,7 @@ def cross( cross = current & previous # ensure there is no cross on the first entry - cross[0] = False + cross.iloc[0] = False if asint: cross = cross.astype(int) diff --git a/pandas_ta/volatility/atrts.py b/pandas_ta/volatility/atrts.py index 43c5b4e5..18965805 100644 --- a/pandas_ta/volatility/atrts.py +++ b/pandas_ta/volatility/atrts.py @@ -135,13 +135,13 @@ def atrts( if offset != 0: atrts = atrts.shift(offset) - # Handle fills + # Fill if "fillna" in kwargs: atrts.fillna(kwargs["fillna"], inplace=True) if "fill_method" in kwargs: atrts.fillna(method=kwargs["fill_method"], inplace=True) - # Name and Categorize it + # Name and Category _props = f"ATRTS{mamode[0]}{'p' if percent else ''}" atrts.name = f"{_props}_{length}_{ma_length}_{multiplier}" atrts.category = "volatility" diff --git a/pandas_ta/volume/mfi.py b/pandas_ta/volume/mfi.py index fae9db11..f5a4c546 100644 --- a/pandas_ta/volume/mfi.py +++ b/pandas_ta/volume/mfi.py @@ -1,9 +1,12 @@ # -*- coding: utf-8 -*- -from pandas import DataFrame, Series +from sys import float_info as sflt +from numpy import convolve, maximum, nan, ones, roll, where +from pandas import Series from pandas_ta._typing import DictLike, Int from pandas_ta.maps import Imports from pandas_ta.overlap import hlc3 from pandas_ta.utils import ( + np_non_zero_range, v_drift, v_offset, v_pos_default, @@ -63,27 +66,18 @@ def mfi( from talib import MFI mfi = MFI(high, low, close, volume, length) else: - typical_price = hlc3(high=high, low=low, close=close, talib=mode_tal) - raw_money_flow = typical_price * volume - - tdf = DataFrame({ - "diff": 0, - "rmf": raw_money_flow, - "+mf": 0, - "-mf": 0 - }) - - tdf.loc[(typical_price.diff(drift) > 0), "diff"] = 1 - tdf.loc[tdf["diff"] == 1, "+mf"] = raw_money_flow - - tdf.loc[(typical_price.diff(drift) < 0), "diff"] = -1 - tdf.loc[tdf["diff"] == -1, "-mf"] = raw_money_flow - - psum = tdf["+mf"].rolling(length).sum() - nsum = tdf["-mf"].rolling(length).sum() - # tdf["mr"] = psum / nsum - mfi = 100 * psum / (psum + nsum) - # tdf["mfi"] = mfi + m, _ones = close.size, ones(length) + + tp = (high.values + low.values + close.values) / 3.0 + smf = tp * volume.values * where(tp > roll(tp, shift=drift), 1, -1) + + pos, neg = maximum(smf, 0), maximum(-smf, 0) + avg_gain, avg_loss = convolve(pos, _ones)[:m], convolve(neg, _ones)[:m] + + _mfi = (100.0 * avg_gain) / (avg_gain + avg_loss + sflt.epsilon) + _mfi[:length] = nan + + mfi = Series(_mfi, index=close.index) # Offset if offset != 0: diff --git a/pandas_ta/volume/vp.py b/pandas_ta/volume/vp.py index 27487ec9..1ca3d157 100644 --- a/pandas_ta/volume/vp.py +++ b/pandas_ta/volume/vp.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +from warnings import simplefilter + from numpy import array_split, mean, sum from pandas import cut, concat, DataFrame, Series from pandas_ta._typing import DictLike, Int @@ -67,10 +69,12 @@ def vp( total_volume_col = f"total_{volume_col}" vp.columns = [close_col, pos_volume_col, neg_volume_col, neut_volume_col] + simplefilter(action="ignore", category=FutureWarning) # sort: Sort by close before splitting into ranges. Default: False # If False, it sorts by date index or chronological versus by price if sort: vp[mean_price_col] = vp[close_col] + vpdf = vp.groupby( cut(vp[close_col], width, include_lowest=True, precision=2), observed=False diff --git a/pandas_ta/volume/vwap.py b/pandas_ta/volume/vwap.py index 5b7cd6ea..f9ccaaf5 100644 --- a/pandas_ta/volume/vwap.py +++ b/pandas_ta/volume/vwap.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from warnings import simplefilter from pandas import DataFrame, Series from pandas_ta._typing import DictLike, Int, List from pandas_ta.overlap import hlc3 @@ -73,6 +74,7 @@ def vwap( # Calculate _props = f"VWAP_{anchor}" wp = typical_price * volume + simplefilter(action="ignore", category=UserWarning) vwap = wp.groupby(wp.index.to_period(anchor)).cumsum() \ / volume.groupby(volume.index.to_period(anchor)).cumsum() diff --git a/requirements.txt b/requirements.txt index 7d89b7ae..1d2f914b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,7 @@ numpy==1.26.4 pandas==2.2.0 pandas-datareader==0.10.0 pyarrow==15.0.0 +scipy==1.12.0 streamlit==1.31.0 TA-Lib==0.4.28 yfinance==0.2.36 diff --git a/setup.py b/setup.py index 70298e85..70770438 100644 --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ "pandas_ta.volatility", "pandas_ta.volume" ], - version=".".join(("0", "4", "10b")), + version=".".join(("0", "4", "11b")), description=long_description, long_description=long_description, author="Kevin Johnson", diff --git a/tests/test_indicator_momentum.py b/tests/test_indicator_momentum.py index c4b83311..709456bc 100644 --- a/tests/test_indicator_momentum.py +++ b/tests/test_indicator_momentum.py @@ -376,10 +376,9 @@ def test_rsx(df): assert result.name == "RSX_14" -@mark.skip(reason="AttributeError: 'Series' object has no attribute 'df'") def test_rvgi(df): - result = ta.rvgi(df.open, df.high, df.low. df.close) # Weird Exception - assert isinstance(result, Series) + result = ta.rvgi(df.open, df.high, df.low, df.close) + assert isinstance(result, DataFrame) assert result.name == "RVGI_14_4" diff --git a/tests/test_indicator_overlap.py b/tests/test_indicator_overlap.py index 6401d4af..c3b16d16 100644 --- a/tests/test_indicator_overlap.py +++ b/tests/test_indicator_overlap.py @@ -266,11 +266,18 @@ def test_mama(df): pdt.assert_frame_equal(result, expecteddf) except AssertionError: try: - corr = ta.utils.df_error_analysis(result, expected) - print(f"{corr=}") - assert corr > CORRELATION_THRESHOLD + mama_corr = ta.utils.df_error_analysis(result.iloc[:, 0], expecteddf.iloc[:, 0]) + assert mama_corr > CORRELATION_THRESHOLD + print(f"{mama_corr=}") except Exception as ex: - error_analysis(result, CORRELATION, ex) + error_analysis(result.iloc[:, 0], CORRELATION, ex) + + try: + fama_corr = ta.utils.df_error_analysis(result.iloc[:, 1], expecteddf.iloc[:, 1]) + assert fama_corr > CORRELATION_THRESHOLD + print(f"{fama_corr=}") + except Exception as ex: + error_analysis(result.iloc[:, 1], CORRELATION, ex) result = ta.mama(df.close) assert isinstance(result, DataFrame) diff --git a/tests/test_indicator_trend.py b/tests/test_indicator_trend.py index a397915d..a9f528ea 100644 --- a/tests/test_indicator_trend.py +++ b/tests/test_indicator_trend.py @@ -180,6 +180,27 @@ def test_dpo(df): assert result.name == "DPO_20" +def test_ht_trendline(df): + result = ta.ht_trendline(df.close, talib=False) + assert isinstance(result, Series) + assert result.name == "HT_TL" + + try: + expected = tal.HT_TRENDLINE(df.close) + pdt.assert_series_equal(result, expected, check_names=False) + except AssertionError: + try: + corr = ta.utils.df_error_analysis(result, expected) + print(f"{corr=}") + assert corr > CORRELATION_THRESHOLD + except Exception as ex: + error_analysis(result, CORRELATION, ex) + + result = ta.ht_trendline(df.close) + assert isinstance(result, Series) + assert result.name == "HT_TL" + + def test_increasing(df): result = ta.increasing(df.close) assert isinstance(result, Series) diff --git a/tests/test_studies.py b/tests/test_studies.py index 20a060f9..e57b1781 100644 --- a/tests/test_studies.py +++ b/tests/test_studies.py @@ -9,7 +9,7 @@ [pytest.param(ta.CommonStudy, id="common"), pytest.param(ta.AllStudy, id="all")] # +/- when adding/removing indicators -ALL_COLUMNS = 323 +ALL_COLUMNS = 324 def test_all_study_props(all_study): @@ -32,7 +32,7 @@ def test_common_study_props(common_study): @pytest.mark.parametrize("category,columns", [ ("candles", 70), ("cycles", 2), ("momentum", 78), ("overlap", 56), - ("performance", 2), ("statistics", 16), ("transform", 5), ("trend", 30), + ("performance", 2), ("statistics", 16), ("transform", 5), ("trend", 31), ("volatility", 36), ("volume", 28), pytest.param(ta.AllStudy, ALL_COLUMNS, id=f"all-{ALL_COLUMNS}"), pytest.param(ta.CommonStudy, 5, id="common-5"), @@ -89,7 +89,7 @@ def test_study_custom_e_talib(df, custom_study_e, talib): @pytest.mark.parametrize("talib", [False, True]) def test_study_all_multirun_talib(df, all_study, talib): - new_columns = 612 # +/- when adding/removing indicators + new_columns = 613 # +/- when adding/removing indicators initial_columns = df.shape[1] df.ta.study(all_study, length=10, cores=0, talib=talib) df.ta.study(all_study, length=50, cores=0, talib=talib) diff --git a/tests/test_utils.py b/tests/test_utils.py index 786729c0..9aa53bfe 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -150,26 +150,17 @@ def test_df_dates(df): def test_fibonacci(): - np.testing.assert_array_equal(ta.utils.fibonacci(zero=True), np.array([0, 1, 1])) - np.testing.assert_array_equal(ta.utils.fibonacci(zero=False), np.array([1, 1])) + np.testing.assert_array_equal(ta.utils.fibonacci(0, False), np.array([1, 1])) + np.testing.assert_array_equal(ta.utils.fibonacci(5, False), np.array([1, 1, 2, 3, 5])) - np.testing.assert_array_equal(ta.utils.fibonacci(n=0, zero=True, weighted=False), np.array([0])) - np.testing.assert_array_equal(ta.utils.fibonacci(n=0, zero=False, weighted=False), np.array([1])) - - np.testing.assert_array_equal(ta.utils.fibonacci(n=5, zero=True, weighted=False), np.array([0, 1, 1, 2, 3, 5])) - np.testing.assert_array_equal(ta.utils.fibonacci(n=5, zero=False, weighted=False), np.array([1, 1, 2, 3, 5])) - - assert isinstance(ta.utils.fibonacci(zero=True, weighted=False), np.ndarray) + assert isinstance(ta.utils.fibonacci(2, False), np.ndarray) def test_fibonacci_weighted(): - np.testing.assert_array_equal(ta.utils.fibonacci(n=0, zero=True, weighted=True), np.array([0])) - np.testing.assert_array_equal(ta.utils.fibonacci(n=0, zero=False, weighted=True), np.array([1])) - - np.testing.assert_allclose(ta.utils.fibonacci(n=5, zero=True, weighted=True), np.array([0, 1 / 12, 1 / 12, 1 / 6, 1 / 4, 5 / 12])) - np.testing.assert_allclose(ta.utils.fibonacci(n=5, zero=False, weighted=True), np.array([1 / 12, 1 / 12, 1 / 6, 1 / 4, 5 / 12])) + np.testing.assert_array_equal(ta.utils.fibonacci(0, True), np.array([0.5, 0.5])) + np.testing.assert_allclose(ta.utils.fibonacci(5, True), np.array([1 / 12, 1 / 12, 1 / 6, 1 / 4, 5 / 12])) - assert isinstance(ta.utils.fibonacci(zero=True, weighted=True), np.ndarray) + assert isinstance(ta.utils.fibonacci(2, True), np.ndarray) def test_geometric_mean(df):