Skip to content

IDF master #5761

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 14, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
8 changes: 4 additions & 4 deletions platform.txt

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion tools/platformio-build-esp32.py
Original file line number Diff line number Diff line change
@@ -300,7 +300,7 @@
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.4-dev-3235-g3e370c4296\\"'),
("IDF_VER", '\\"v4.4-dev-3401-gb86fe0c66c\\"'),
"ESP_PLATFORM",
"ARDUINO_ARCH_ESP32",
"ESP32",
13 changes: 11 additions & 2 deletions tools/platformio-build-esp32c3.py
Original file line number Diff line number Diff line change
@@ -263,6 +263,15 @@
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "wifi_provisioning", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp_littlefs", "src"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp_littlefs", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "tool"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "typedef"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "image"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "math"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "nn"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "layer"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "detect"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "esp-face", "include", "model_zoo"),
join(FRAMEWORK_DIR, "tools", "sdk", "esp32c3", "include", "fb_gfx", "include"),
join(FRAMEWORK_DIR, "cores", env.BoardConfig().get("build.core"))
],
@@ -273,7 +282,7 @@
],

LIBS=[
"-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lasio", "-lbt", "-lcbor", "-lunity", "-lcmock", "-lcoap", "-lconsole", "-lnghttp", "-lesp-tls", "-lesp_adc_cal", "-lesp_hid", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lesp_https_server", "-lesp_lcd", "-lprotobuf-c", "-lprotocomm", "-lmdns", "-lesp_local_ctrl", "-lsdmmc", "-lesp_serial_slave_link", "-lesp_websocket_client", "-lexpat", "-lwear_levelling", "-lfatfs", "-lfreemodbus", "-ljsmn", "-ljson", "-llibsodium", "-lmqtt", "-lopenssl", "-lspiffs", "-lwifi_provisioning", "-lesp_littlefs", "-lfb_gfx", "-lasio", "-lcbor", "-lcmock", "-lunity", "-lcoap", "-lesp_lcd", "-lesp_local_ctrl", "-lesp_https_server", "-lesp_websocket_client", "-lexpat", "-lfreemodbus", "-ljsmn", "-llibsodium", "-lmqtt", "-lesp_adc_cal", "-lesp_hid", "-lfatfs", "-lwear_levelling", "-lopenssl", "-lspiffs", "-lwifi_provisioning", "-lprotocomm", "-lbt", "-lbtdm_app", "-lprotobuf-c", "-lmdns", "-lconsole", "-ljson", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lphy", "-lbtbb", "-lesp_phy", "-lphy", "-lbtbb", "-lesp_phy", "-lphy", "-lbtbb", "-lm", "-lnewlib", "-lstdc++", "-lpthread", "-lgcc", "-lcxx", "-lapp_trace", "-lgcov", "-lapp_trace", "-lgcov", "-lc"
"-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lasio", "-lbt", "-lcbor", "-lunity", "-lcmock", "-lcoap", "-lconsole", "-lnghttp", "-lesp-tls", "-lesp_adc_cal", "-lesp_hid", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lesp_https_server", "-lesp_lcd", "-lprotobuf-c", "-lprotocomm", "-lmdns", "-lesp_local_ctrl", "-lsdmmc", "-lesp_serial_slave_link", "-lesp_websocket_client", "-lexpat", "-lwear_levelling", "-lfatfs", "-lfreemodbus", "-ljsmn", "-ljson", "-llibsodium", "-lmqtt", "-lopenssl", "-lspiffs", "-lwifi_provisioning", "-lesp_littlefs", "-lfb_gfx", "-lasio", "-lcbor", "-lcmock", "-lunity", "-lcoap", "-lesp_lcd", "-lesp_local_ctrl", "-lesp_https_server", "-lesp_websocket_client", "-lexpat", "-lfreemodbus", "-ljsmn", "-llibsodium", "-lmqtt", "-lesp_adc_cal", "-lesp_hid", "-lfatfs", "-lwear_levelling", "-lopenssl", "-lspiffs", "-lwifi_provisioning", "-lprotocomm", "-lbt", "-lbtdm_app", "-lprotobuf-c", "-lmdns", "-lconsole", "-ljson", "-lcat_face_detect", "-lhuman_face_detect", "-ldl", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lesp_ringbuf", "-lefuse", "-lesp_ipc", "-ldriver", "-lesp_pm", "-lmbedtls", "-lapp_update", "-lbootloader_support", "-lspi_flash", "-lnvs_flash", "-lpthread", "-lesp_gdbstub", "-lespcoredump", "-lesp_phy", "-lesp_system", "-lesp_rom", "-lhal", "-lvfs", "-lesp_eth", "-ltcpip_adapter", "-lesp_netif", "-lesp_event", "-lwpa_supplicant", "-lesp_wifi", "-llwip", "-llog", "-lheap", "-lsoc", "-lesp_hw_support", "-lriscv", "-lesp_common", "-lesp_timer", "-lfreertos", "-lnewlib", "-lcxx", "-lapp_trace", "-lnghttp", "-lesp-tls", "-ltcp_transport", "-lesp_http_client", "-lesp_http_server", "-lesp_https_ota", "-lsdmmc", "-lesp_serial_slave_link", "-lmbedtls", "-lmbedcrypto", "-lmbedx509", "-lcoexist", "-lcore", "-lespnow", "-lmesh", "-lnet80211", "-lpp", "-lsmartconfig", "-lwapi", "-lphy", "-lbtbb", "-lesp_phy", "-lphy", "-lbtbb", "-lesp_phy", "-lphy", "-lbtbb", "-lm", "-lnewlib", "-lstdc++", "-lpthread", "-lgcc", "-lcxx", "-lapp_trace", "-lgcov", "-lapp_trace", "-lgcov", "-lc"
],

CPPDEFINES=[
@@ -282,7 +291,7 @@
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.4-dev-3235-g3e370c4296\\"'),
("IDF_VER", '\\"v4.4-dev-3401-gb86fe0c66c\\"'),
"ESP_PLATFORM",
"ARDUINO_ARCH_ESP32",
"ESP32",
2 changes: 1 addition & 1 deletion tools/platformio-build-esp32s2.py
Original file line number Diff line number Diff line change
@@ -286,7 +286,7 @@
"UNITY_INCLUDE_CONFIG_H",
"WITH_POSIX",
"_GNU_SOURCE",
("IDF_VER", '\\"v4.4-dev-3235-g3e370c4296\\"'),
("IDF_VER", '\\"v4.4-dev-3401-gb86fe0c66c\\"'),
"ESP_PLATFORM",
"ARDUINO_ARCH_ESP32",
"ESP32",
5 changes: 5 additions & 0 deletions tools/sdk/esp32/include/asio/port/include/esp_asio_config.h
Original file line number Diff line number Diff line change
@@ -18,6 +18,11 @@
# define ASIO_NO_TYPEID
# endif // CONFIG_COMPILER_RTTI

//
// Supress OpenSSL deprecation warning, when building ASIO
//
#define ESP_OPENSSL_SUPPRESS_LEGACY_WARNING

//
// LWIP compatibility inet and address macros/functions
//
Original file line number Diff line number Diff line change
@@ -191,6 +191,9 @@ int coap_handle_response_get_block(coap_context_t *context,
void coap_block_delete_lg_xmit(coap_session_t *session,
coap_lg_xmit_t *lg_xmit);

coap_tick_t coap_block_check_lg_xmit_timeouts(coap_session_t *session,
coap_tick_t now);

/**
* The function that does all the work for the coap_add_data_large*()
* functions.
Original file line number Diff line number Diff line change
@@ -27,6 +27,12 @@ typedef struct coap_dtls_pki_t coap_dtls_pki_t;
#ifndef COAP_DTLS_HINT_LENGTH
#define COAP_DTLS_HINT_LENGTH 128
#endif
#ifndef COAP_DTLS_MAX_PSK_IDENTITY
#define COAP_DTLS_MAX_PSK_IDENTITY 64
#endif
#ifndef COAP_DTLS_MAX_PSK
#define COAP_DTLS_MAX_PSK 64
#endif

typedef enum coap_dtls_role_t {
COAP_DTLS_ROLE_CLIENT, /**< Internal function invoked for client */
28 changes: 14 additions & 14 deletions tools/sdk/esp32/include/coap/libcoap/include/coap3/coap_event.h
Original file line number Diff line number Diff line change
@@ -24,34 +24,34 @@
* Scalar type to represent different events, e.g. DTLS events or
* retransmission timeouts.
*/
typedef unsigned int coap_event_t;

typedef enum coap_event_t {
/**
* (D)TLS events for COAP_PROTO_DTLS and COAP_PROTO_TLS
*/
#define COAP_EVENT_DTLS_CLOSED 0x0000
#define COAP_EVENT_DTLS_CONNECTED 0x01DE
#define COAP_EVENT_DTLS_RENEGOTIATE 0x01DF
#define COAP_EVENT_DTLS_ERROR 0x0200
COAP_EVENT_DTLS_CLOSED = 0x0000,
COAP_EVENT_DTLS_CONNECTED = 0x01DE,
COAP_EVENT_DTLS_RENEGOTIATE = 0x01DF,
COAP_EVENT_DTLS_ERROR = 0x0200,

/**
* TCP events for COAP_PROTO_TCP and COAP_PROTO_TLS
*/
#define COAP_EVENT_TCP_CONNECTED 0x1001
#define COAP_EVENT_TCP_CLOSED 0x1002
#define COAP_EVENT_TCP_FAILED 0x1003
COAP_EVENT_TCP_CONNECTED = 0x1001,
COAP_EVENT_TCP_CLOSED = 0x1002,
COAP_EVENT_TCP_FAILED = 0x1003,

/**
* CSM exchange events for reliable protocols only
*/
#define COAP_EVENT_SESSION_CONNECTED 0x2001
#define COAP_EVENT_SESSION_CLOSED 0x2002
#define COAP_EVENT_SESSION_FAILED 0x2003
COAP_EVENT_SESSION_CONNECTED = 0x2001,
COAP_EVENT_SESSION_CLOSED = 0x2002,
COAP_EVENT_SESSION_FAILED = 0x2003,

/**
* BLOCK2 receive errors
* (Q-)BLOCK receive errors
*/
#define COAP_EVENT_PARTIAL_BLOCK 0x3001
COAP_EVENT_PARTIAL_BLOCK = 0x3001
} coap_event_t;

/**
* Type for event handler functions that can be registered with a CoAP
Original file line number Diff line number Diff line change
@@ -88,7 +88,11 @@ COAP_STATIC_INLINE uint64_t coap_ticks_to_rt_us(coap_tick_t t) {
#elif defined(RIOT_VERSION)
#include <xtimer.h>

#ifdef XTIMER_HZ
#define COAP_TICKS_PER_SECOND (XTIMER_HZ)
#else /* XTIMER_HZ */
#define COAP_TICKS_PER_SECOND (XTIMER_HZ_BASE)
#endif /* XTIMER_HZ */

typedef uint64_t coap_tick_t;
typedef int64_t coap_tick_diff_t;
1 change: 1 addition & 0 deletions tools/sdk/esp32/include/coap/libcoap/include/coap3/net.h
Original file line number Diff line number Diff line change
@@ -15,6 +15,7 @@
#include <stdlib.h>
#include <string.h>
#ifndef _WIN32
#include <sys/select.h>
#include <sys/time.h>
#endif
#include <time.h>
1 change: 0 additions & 1 deletion tools/sdk/esp32/include/coap/libcoap/include/coap3/pdu.h
Original file line number Diff line number Diff line change
@@ -299,7 +299,6 @@ typedef enum coap_pdu_code_t {
COAP_REQUEST_CODE_PATCH = COAP_REQUEST_PATCH,
COAP_REQUEST_CODE_IPATCH = COAP_REQUEST_IPATCH,

COAP_RESPONSE_CODE_OK = COAP_RESPONSE_CODE(200),
COAP_RESPONSE_CODE_CREATED = COAP_RESPONSE_CODE(201),
COAP_RESPONSE_CODE_DELETED = COAP_RESPONSE_CODE(202),
COAP_RESPONSE_CODE_VALID = COAP_RESPONSE_CODE(203),
Original file line number Diff line number Diff line change
@@ -83,7 +83,8 @@ typedef void (*coap_method_handler_t)
* variable of coap_str_const_t has to point to constant text, or point to data
* within the allocated coap_str_const_t parameter.
*
* @param uri_path The string URI path of the new resource.
* @param uri_path The string URI path of the new resource. The leading '/' is
* not normally required - e.g. just "full/path/for/resource".
* @param flags Flags for memory management (in particular release of
* memory). Possible values:@n
*
5 changes: 4 additions & 1 deletion tools/sdk/esp32/include/config/sdkconfig.h
Original file line number Diff line number Diff line change
@@ -26,6 +26,7 @@
#define CONFIG_BOOTLOADER_FLASH_XMC_SUPPORT 1
#define CONFIG_ESPTOOLPY_BAUD_OTHER_VAL 115200
#define CONFIG_ESPTOOLPY_FLASHMODE_DIO 1
#define CONFIG_ESPTOOLPY_FLASH_SAMPLE_MODE_STR 1
#define CONFIG_ESPTOOLPY_FLASHMODE "dio"
#define CONFIG_ESPTOOLPY_FLASHFREQ_40M 1
#define CONFIG_ESPTOOLPY_FLASHFREQ "40m"
@@ -369,6 +370,7 @@
#define CONFIG_LWIP_GARP_TMR_INTERVAL 60
#define CONFIG_LWIP_TCPIP_RECVMBOX_SIZE 32
#define CONFIG_LWIP_DHCP_RESTORE_LAST_IP 1
#define CONFIG_LWIP_DHCP_OPTIONS_LEN 68
#define CONFIG_LWIP_DHCPS 1
#define CONFIG_LWIP_DHCPS_LEASE_UNIT 60
#define CONFIG_LWIP_DHCPS_MAX_STATION_NUM 8
@@ -480,6 +482,7 @@
#define CONFIG_MDNS_TASK_AFFINITY 0x0
#define CONFIG_MDNS_SERVICE_ADD_TIMEOUT_MS 2000
#define CONFIG_MDNS_TIMER_PERIOD_MS 100
#define CONFIG_MDNS_MULTIPLE_INSTANCE 1
#define CONFIG_MQTT_PROTOCOL_311 1
#define CONFIG_MQTT_TRANSPORT_SSL 1
#define CONFIG_MQTT_TRANSPORT_WEBSOCKET 1
@@ -675,5 +678,5 @@
#define CONFIG_ULP_COPROC_ENABLED CONFIG_ESP32_ULP_COPROC_ENABLED
#define CONFIG_ULP_COPROC_RESERVE_MEM CONFIG_ESP32_ULP_COPROC_RESERVE_MEM
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
#define CONFIG_ARDUINO_IDF_COMMIT "3e370c4296"
#define CONFIG_ARDUINO_IDF_COMMIT "b86fe0c66c"
#define CONFIG_ARDUINO_IDF_BRANCH "master"
25 changes: 22 additions & 3 deletions tools/sdk/esp32/include/driver/include/driver/rmt.h
Original file line number Diff line number Diff line change
@@ -856,16 +856,35 @@ esp_err_t rmt_remove_channel_from_group(rmt_channel_t channel);

#if SOC_RMT_SUPPORT_TX_LOOP_COUNT
/**
* @brief Set loop count for RMT TX channel
* @brief Set loop count threshold value for RMT TX channel
*
* When tx loop count reaches this value, an ISR callback will notify user
*
* @param channel RMT channel
* @param count loop count
* @param count loop count, 1 ~ 1023
* @return
* - ESP_ERR_INVALID_ARG Parameter error
* - ESP_OK Success
*/
esp_err_t rmt_set_tx_loop_count(rmt_channel_t channel, uint32_t count);
#endif

/**
* @brief Enable or disable the feature that when loop count reaches the threshold, RMT will stop transmitting.
*
* - When the loop auto-stop feature is enabled will halt RMT transmission after the loop count reaches a certain threshold
* - When disabled, the RMT transmission continue indefinitely until halted by the users
*
* @note The auto-stop feature is implemented in hardware on particular targets (i.e. those with SOC_RMT_SUPPORT_TX_LOOP_AUTOSTOP defined).
* Otherwise, the auto-stop feature is implemented in software via the interrupt.
*
* @param channel RMT channel
* @param en enable bit
* @return
* - ESP_ERR_INVALID_ARG Parameter error
* - ESP_OK Success
*/
esp_err_t rmt_enable_tx_loop_autostop(rmt_channel_t channel, bool en);
#endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT

/**
* @brief Reset RMT TX/RX memory index.
19 changes: 14 additions & 5 deletions tools/sdk/esp32/include/esp-face/include/dl_define.hpp
Original file line number Diff line number Diff line change
@@ -10,7 +10,7 @@
#define DL_LOG_LAYER_LATENCY 0 /*<! - 1: print the latency of each parts of layer */
/*<! - 0: mute */

#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S3_SPIRAM_SUPPORT
#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT || CONFIG_ESP32S3_SPIRAM_SUPPORT
#define DL_SPIRAM_SUPPORT 1
#else
#define DL_SPIRAM_SUPPORT 0
@@ -83,8 +83,17 @@ namespace dl

typedef enum
{
PADDING_VALID, /*<! no padding >*/
PADDING_SAME, /*<! SAME in TensorFlow style >*/
PADDING_SAME_MXNET /*<! SAME in MXNET style >*/
PADDING_NOT_SET,
PADDING_VALID, /*<! no padding >*/
PADDING_SAME_BEGIN, /*<! SAME in MXNET style >*/
PADDING_SAME_END, /*<! SAME in TensorFlow style >*/
} padding_type_t;
} // namespace dl

typedef enum
{
CONSTANT,
EDGE,
REFLECT,
SYMMETRIC,
} padding_mode_t;
} // namespace dl
61 changes: 60 additions & 1 deletion tools/sdk/esp32/include/esp-face/include/image/dl_image.hpp
Original file line number Diff line number Diff line change
@@ -370,11 +370,70 @@ namespace dl
*/
uint32_t get_moving_point_number(uint8_t *f1, uint8_t *f2, const uint32_t height, const uint32_t width, const uint32_t stride, const uint32_t threshold = 5);


/**
* @brief Apply an affine transformation to an image.
*
* @tparam T
* @param input the input image.
* @param output the output image.
* @param M_inv the inverse transformation matrix.
*/
template <typename T>
void warp_affine(dl::Tensor<T> *input, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);

/**
* @brief Apply an affine transformation to an image.
*
* @tparam T
* @param input the pointer of the input image.
* @param shape the shape of the input image.
* @param output the output image.
* @param M_inv the inverse transformation matrix.
*/
template <typename T>
void warp_affine(uint16_t *input, std::vector<int> shape, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);

/**
* @brief Get the otsu thresh object.
*
* @param image the gray image.
* @return uint8_t the otsu thresh.
*/
uint8_t get_otsu_thresh(Tensor<uint8_t> &image);

/**
* @brief Convert RGB image to gray image
*
* @param image input image
* @param bgr true: the image is in BGR format
* false: the image is in RGB format
* @return Tensor<uint8_t>* output image in gray format
*/
Tensor<uint8_t> *rgb2gray(Tensor<uint8_t> &image, bool bgr = false);

/**
* @brief Convert RGB image to LAB image
*
* @param image input image
* @param bgr true: the image is in BGR format
* false: the image is in RGB format
* @param fast true: use the fast alogrithm, but the accuracy will be reduced
* false: do not use the fast alogrithm
* @return Tensor<uint8_t>* output image in LAB foramt
*/
Tensor<uint8_t> *rgb2lab(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);

/**
* @brief Convert RGB image to HSV image
*
* @param image input image
* @param bgr true: the image is in BGR format
* false: the image is in RGB format
* @param fast true: use the fast alogrithm, but the accuracy will be reduced
* false: do not use the fast alogrithm
* @return Tensor<uint8_t>* output image in HSV format
*/
Tensor<uint8_t> *rgb2hsv(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);

} // namespace image
} // namespace dl
Original file line number Diff line number Diff line change
@@ -25,7 +25,8 @@ namespace dl
const int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of add2d >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of add2d >*/

public:
/**
@@ -35,19 +36,21 @@ namespace dl
* @param activation activation of add2d, if you don't specify anything, no activation is applied
* @param name name of add2d
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
Add2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = NULL, bool inplace = false) : Layer(name), activation(activation), output_exponent(output_exponent), output(NULL)
{
this->inplace = inplace;
}
Add2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Add2D", bool inplace = false) : Layer(name),
activation(activation),
output_exponent(output_exponent),
output(NULL),
inplace(inplace),
output_shape({}) {}

/**
* @brief Destroy the Add2D object
*/
~Add2D()
{
if((!this->inplace) && (this->output != NULL))
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
@@ -59,10 +62,12 @@ namespace dl
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
this->output_shape = input0.shape;

if (!this->inplace)
{
@@ -78,6 +83,11 @@ namespace dl
{
this->output = &input0;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -105,7 +115,11 @@ namespace dl
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -116,6 +130,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::add2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "add2d");
}
Original file line number Diff line number Diff line change
@@ -24,40 +24,50 @@ namespace dl
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of AvgPool2D >*/
Tensor<feature_t> *output; /*<! output ptr of AvgPool2D >*/
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/

public:

/**
* @brief Construct a new AvgPool2D object.
*
* @param output_exponent exponent of output
* @param filter_shape filter shape in [filter_height, filter_width]
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME results padding in TensorFlow style
* - PADDING_SAME_MXNET results padding in MXNET style
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y stride in height
* @param stride_x stride in width
* @param name name of layer
*/
AvgPool2D(const int output_exponent,
const std::vector<int> filter_shape,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = NULL) : Layer(name),
output_exponent(output_exponent),
filter_shape(filter_shape),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type)
const char *name = "AvgPool2D") : Layer(name),
output_exponent(output_exponent),
filter_shape(filter_shape),
padding_type(padding_type),
padding(padding),
stride_y(stride_y),
stride_x(stride_x),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}

/**
@@ -66,7 +76,7 @@ namespace dl
*/
~AvgPool2D()
{
if(this->output != NULL)
if (this->output != NULL)
{
delete this->output;
}
@@ -76,20 +86,31 @@ namespace dl
* @brief Update output shape and padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
std::vector<int> output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
this->output->set_shape(output_shape);
assert(input.shape.size() == 3);

this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);

this->padding = nn::get_pad_size(output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
input.set_padding_size(this->padding);
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
}

this->output->free_element();
}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
* @brief Get the output
@@ -108,15 +129,18 @@ namespace dl
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return AvgPool2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
{
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#pragma once
#include "dl_tool.hpp"
#include "dl_tool_cache.hpp"
#include <iostream>

namespace dl
{
139 changes: 139 additions & 0 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_concat.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#pragma once

#include <assert.h>
#include <vector>

#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
#include "dl_nn_concat.hpp"

namespace dl
{
namespace layer
{
/**
* @brief Concat(input1, input2, input3, ...).
*
* @tparam feature_t support all kinds of integer and float data type
*/
template <typename feature_t>
class Concat : Layer
{
private:
int output_exponent; /*<! exponent of output >*/
int axis; /*<! The axis along which the Tensor will be concatenated. >*/
Tensor<feature_t> *output; /*<! output ptr of Concat >*/
std::vector<int> output_shape; /*<! output shape of Concat >*/
public:
/**
* @brief Construct a new Concat object.
*
* @param name name of layer
* @param axis The axis along which the Tensor will be concatenated.
*/
Concat(int axis, const char *name = "Concat") : Layer(name), axis(axis), output_shape({})
{
this->output = new Tensor<feature_t>;
}

/**
* @brief Destroy the Concat object
*/
~Concat()
{
if (this->output != NULL)
{
delete this->output;
}
}

/**
* @brief Collect inputs' channel and memory offset, called in Model.build().
*
* @param args pointers of concatenated Tensor
* @param print_shape whether to print the output shape.
*/
void build(std::vector<Tensor<feature_t> *> args, bool print_shape = false)
{
assert(args.size() > 1);
int shape_size = args[0]->shape.size();

if (this->axis < 0)
{
this->axis = shape_size + this->axis;
}
assert((this->axis < shape_size) && (this->axis > -1));

int output_shape_axis = args[0]->shape[this->axis];

for (int i = 1; i < args.size(); i++)
{
assert(shape_size == args[i]->shape.size());
assert(args[i]->exponent == args[i - 1]->exponent);
output_shape_axis += args[i]->shape[this->axis];

for (int j = 0; j < shape_size; j++)
{
if (j != this->axis)
{
assert(args[i]->shape[j] == args[i - 1]->shape[j]);
}
}
}

this->output_exponent = args[0]->exponent;
this->output_shape = args[0]->shape;
this->output_shape[this->axis] = output_shape_axis;

this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
* @brief Call Concat operation
*
* @param inputs the pointers of inputs
* @param free_inputs true: free the inputs after call
* false: do not free inputs
* @return Tensor<feature_t>& concat result
*/
Tensor<feature_t> &call(std::vector<Tensor<feature_t> *> inputs, bool free_inputs = false)
{
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

DL_LOG_LAYER_LATENCY_START();
nn::concat(*this->output, inputs, this->axis, free_inputs);
DL_LOG_LAYER_LATENCY_END(this->name, "concat");
return *this->output;
}

/**
* @brief Get the output
*
* @return Tensor<feature_t>& Concat result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
};
} // namespace layer
} // namespace dl
Original file line number Diff line number Diff line change
@@ -13,57 +13,70 @@ namespace dl
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
* - int16_t: for int16 quantization and int8 per-channel quantization
* - int8_t: for int8 per-tensor quantization
*/
template <typename feature_t>
template <typename feature_t, typename bias_t = feature_t>
class Conv2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const Filter<feature_t> *filter; /*<! filter of Conv2D >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
const Bias<feature_t> *bias; /*<! bias of Conv2D, if you don't specify anything, no bias is added >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
const Bias<bias_t> *bias; /*<! bias of Conv2D, if you don't specify anything, no bias is added >*/
const Activation<feature_t> *activation; /*<! activation of Conv2D, if you don't specify anything, no activation is applied >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of Conv2D >*/
Tensor<feature_t> *output; /*<! output ptr of Conv2D >*/
std::vector<int> output_shape; /*<! output shape of Conv2D >*/

public:

/**
* @brief Construct a new Conv2D object.
*
* @param output_exponent exponent of output
* @param filter filter of Conv2D
* @param bias bias of Conv2D, if you don't specify anything, no bias is added
* @param activation activation of Conv2D, if you don't specify anything, no activation is applied
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME results padding in TensorFlow style
* - PADDING_SAME_MXNET results padding in MXNET style
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y stride in height
* @param stride_x stride in width
* @param name name of layer
*/
Conv2D(const int output_exponent,
const Filter<feature_t> *filter,
const Bias<feature_t> *bias = NULL,
const Bias<bias_t> *bias = NULL,
const Activation<feature_t> *activation = NULL,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = NULL) : Layer(name),
output_exponent(output_exponent),
filter(filter),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
bias(bias),
activation(activation)
const char *name = "Conv2D") : Layer(name),
output_exponent(output_exponent),
filter(filter),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
bias(bias),
activation(activation),
padding(padding),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}

/**
@@ -82,19 +95,30 @@ namespace dl
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
assert(this->filter->shape.size() == 4);
assert(input.shape[2] == this->filter->shape[2]);

std::vector<int> output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, true);
this->output->set_shape(output_shape);
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, true, this->padding);
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
}

this->padding = nn::get_pad_size(output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
input.set_padding_size(this->padding);
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -122,7 +146,11 @@ namespace dl
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -153,5 +181,6 @@ namespace dl
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
}
};

} // namespace layer
} // namespace dl
Original file line number Diff line number Diff line change
@@ -13,64 +13,77 @@ namespace dl
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
* - int16_t: for int16 quantization and int8 per-channel quantization
* - int8_t: for int8 per-tensor quantization
*/
template <typename feature_t>
template <typename feature_t, typename bias_t = feature_t>
class DepthwiseConv2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const Filter<feature_t> *filter; /*<! filter of DepthwiseConv2D >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
const Bias<feature_t> *bias; /*<! bias of DepthwiseConv2D, if you don't specify anything, no bias is added >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
const Bias<bias_t> *bias; /*<! bias of DepthwiseConv2D, if you don't specify anything, no bias is added >*/
const Activation<feature_t> *activation; /*<! activation of DepthwiseConv2D, if you don't specify anything, no activation is applied >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of DepthwiseConv2D >*/
std::vector<int> output_shape; /*<! output shape of DepthwiseConv2D >*/

public:

/**
* @brief Construct a new DepthwiseConv2D object.
*
* @param output_exponent exponent of output
* @param filter filter of DepthwiseConv2D
* @param bias bias of DepthwiseConv2D, if you don't specify anything, no bias is added
* @param activation activation of DepthwiseConv2D, if you don't specify anything, no activation is applied
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input
* - PADDING_SAME results padding in TensorFlow style
* - PADDING_SAME_MXNET results padding in MXNET style
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y - stride in height
* @param stride_x - stride in width
* @param name name of layer
*/
DepthwiseConv2D(const int output_exponent,
const Filter<feature_t> *filter,
const Bias<feature_t> *bias = NULL,
const Bias<bias_t> *bias = NULL,
const Activation<feature_t> *activation = NULL,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = NULL) : Layer(name),
output_exponent(output_exponent),
filter(filter),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
bias(bias),
activation(activation)
const char *name = "DepthwiseConv2D") : Layer(name),
output_exponent(output_exponent),
filter(filter),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
bias(bias),
activation(activation),
padding(padding),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}

/**
* @brief Destroy the DepthwiseConv2D object.
*
*/
~DepthwiseConv2D()
~DepthwiseConv2D()
{
if (this->output != NULL)
{
@@ -82,19 +95,31 @@ namespace dl
* @brief Update output shape and padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
assert(this->filter->shape.size() == 4);
assert(input.shape[2] == this->filter->shape[2]);

std::vector<int> output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
this->output->set_shape(output_shape);
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);

this->padding = nn::get_pad_size(output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
input.set_padding_size(this->padding);
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
}
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -122,7 +147,12 @@ namespace dl
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}

this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
#pragma once

#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"

namespace dl
{
namespace layer
{
/**
* @brief
*
* @tparam feature_t
*/
template <typename feature_t>
class ExpandDims : public Layer
{
private:
std::vector<int> output_shape; /*<! output shape of ExpandDims >*/
std::vector<int> axis; /*<! position where the new axis is placed. >*/
Tensor<feature_t> *output; /*<! output ptr of ExpandDims >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/

public:
int output_exponent;

/**
* @brief Construct a new ExpandDims object
*
* @param axis position where the new axis is placed.
* @param name name of layer
* @param inplace true: the output will store to input
* false: the output will store to a separate memory
*/
ExpandDims(std::vector<int> axis, const char *name = "ExpandDims", bool inplace = false) : Layer(name),
axis(axis), inplace(inplace), output_shape({})
{
}

/**
* @brief Destroy the ExpandDims object
*
*/
~ExpandDims()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}

/**
* @brief Update output shape.
*
* @param input as an input.
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->expand_dims(this->axis);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
this->output->expand_dims(this->axis);
}
this->output_shape = this->output->shape;

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
* @brief Get the output
*
* @return Tensor<feature_t>& ExpandDims result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}

/**
* @brief call ExpandDims opeartion
*
* @param input
* @return Tensor<feature_t>& ExpandDims result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();

if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->set_shape(this->output_shape);
this->output->copy_element(input, true);
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_shape(this->output_shape);
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl
120 changes: 120 additions & 0 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_flatten.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#pragma once

#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"

namespace dl
{
namespace layer
{
/**
* @brief
*
* @tparam feature_t
*/
template <typename feature_t>
class Flatten : public Layer
{
private:
int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of Flatten >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of Flatten >*/

public:
/**
* @brief Construct a new Flatten object
*
* @param name name of layer
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Flatten(const char *name = "Flatten", bool inplace = false) : Layer(name), inplace(inplace), output_shape({})
{}

/**
* @brief Destroy the Flatten object
*
*/
~Flatten()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}

/**
* @brief Update output shape.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
this->output_shape = {input.get_size()};
if (!this->inplace)
{
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
* @brief Get the output
*
* @return Tensor<feature_t>& Flatten result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}

/**
* @brief Call Flatten operation.
*
* @param input as an input
* @return Tensor<feature_t>& Flatten result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();

if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->flatten();
this->output->copy_element(input, true);
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->flatten();
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
#pragma once

#include "dl_nn_fully_connected.hpp"
#include "dl_layer_base.hpp"

namespace dl
{
namespace layer
{
/**
* @brief Activation(FullyConnected(input, filter) + bias).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
* - int16_t: for int16 quantization and int8 per-channel quantization
* - int8_t: for int8 per-tensor quantization
*/
template <typename feature_t, typename bias_t = feature_t>
class FullyConnected : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const bool flatten; /*<! true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim] >*/
const Filter<feature_t> *filter; /*<! filter of FullyConnected >*/
const Bias<bias_t> *bias; /*<! bias of FullyConnected, if you don't specify anything, no bias is added >*/
const Activation<feature_t> *activation; /*<! activation of FullyConnected, if you don't specify anything, no activation is applied >*/
Tensor<feature_t> *output; /*<! output ptr of FullyConnected >*/
std::vector<int> output_shape; /*<! output shape of FullyConnected >*/

public:
/**
* @brief Construct a new FullyConnected object.
*
* @param output_exponent exponent of output
* @param filter filter of FullyConnected
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
* @param name name of layer
*/
FullyConnected(const int output_exponent,
const Filter<feature_t> *filter,
const Bias<bias_t> *bias = NULL,
const Activation<feature_t> *activation = NULL,
const bool flatten = true,
const char *name = "FullyConnected") : Layer(name),
output_exponent(output_exponent),
flatten(flatten),
filter(filter),
bias(bias),
activation(activation),
output_shape({})
{
this->output = new Tensor<feature_t>;
}

/**
* @brief Destroy the FullyConnected object.
*
*/
~FullyConnected()
{
if (this->output != NULL)
{
delete this->output;
}
}

/**
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(this->filter->shape.size() == 4);
assert(this->filter->shape[0] == 1);
assert(this->filter->shape[1] == 1);
if (this->flatten)
{
assert(input.get_size() == this->filter->shape[2]);
this->output_shape = {this->filter->shape[3]};
}
else
{
assert(input.shape.back() == this->filter->shape[2]);
this->output_shape = input.shape;
this->output_shape[this->output_shape.size() - 1] = this->filter->shape[3];
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
* @brief Get the output
*
* @return Tensor<feature_t>& FullyConnected result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}

/**
* @brief Call FullyConnected operation
*
* @param input as an input.
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return FullyConnected result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}

DL_LOG_LAYER_LATENCY_START();
nn::fully_connected(*this->output, input, *(this->filter), this->bias, this->activation, this->flatten, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "fully_connected");
return *this->output;
}

/**
* @brief Preload the filter to Cache.
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is doing calculation.
*/
void preload()
{
size_t size = sizeof(feature_t);
int shape_size = this->filter->shape.size();
for (int i = 0; i < shape_size; ++i)
{
size *= filter->shape[i];
}
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
}
};
} // namespace layer
} // namespace dl
Original file line number Diff line number Diff line change
@@ -20,17 +20,19 @@ namespace dl
class GlobalAveragePool2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of GlobalAveragePool2D >*/
const int output_exponent; /*<! exponent of output >*/
std::vector<int> output_shape; /*<! output shape of GlobalAveragePool2D >*/
Tensor<feature_t> *output; /*<! output ptr of GlobalAveragePool2D >*/
public:
/**
* @brief Construct a new GlobalAveragePool2D object.
*
* @param output_exponent exponent of output
* @param name name of layer
*/
GlobalAveragePool2D(const int output_exponent, const char *name = NULL) : Layer(name),
output_exponent(output_exponent)
GlobalAveragePool2D(const int output_exponent, const char *name = "GlobalAveragePool2D") : Layer(name),
output_exponent(output_exponent),
output_shape({})

{
this->output = new Tensor<feature_t>;
@@ -52,17 +54,26 @@ namespace dl
* @brief Update output shape.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);

std::vector<int> output_shape(input.shape.size(), 1);
output_shape[2] = input.shape[2];
this->output->set_shape(output_shape);
this->output_shape = output_shape;
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -90,7 +101,11 @@ namespace dl
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

Original file line number Diff line number Diff line change
@@ -20,15 +20,15 @@ namespace dl
class GlobalMaxPool2D : public Layer
{
private:
Tensor<feature_t> *output; /*<! output ptr of GlobalMaxPool2D >*/
Tensor<feature_t> *output; /*<! output ptr of GlobalMaxPool2D >*/
std::vector<int> output_shape; /*<! output shape of GlobalMaxPool2D >*/
public:

/**
* @brief Construct a new GlobalMaxPool2D object.
*
* @param name name of layer
*/
GlobalMaxPool2D(const char *name = NULL) : Layer(name)
GlobalMaxPool2D(const char *name = "GlobalMaxPool2D") : Layer(name), output_shape({})
{
this->output = new Tensor<feature_t>;
}
@@ -49,17 +49,26 @@ namespace dl
* @brief Update output shape and exponent.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
this->output->set_exponent(input.exponent);

std::vector<int> output_shape(input.shape.size(), 1);
output_shape[2] = input.shape[2];
this->output->set_shape(output_shape);
this->output_shape = output_shape;
this->output->set_shape(this->output_shape);
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -87,7 +96,11 @@ namespace dl
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@

#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_LeakyReLU.hpp"
#include "dl_nn_leakyrelu.hpp"
#include "dl_layer_base.hpp"

namespace dl
@@ -20,23 +20,23 @@ namespace dl
class LeakyReLU : public Layer
{
private:
feature_t activation_alpha; /*<! quantized alpha >*/
int activation_exponent; /*<! exponent of quantized alpha >*/
Tensor<feature_t> *output; /*<! output ptr of leakyrelu>*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
feature_t activation_alpha; /*<! quantized alpha >*/
int activation_exponent; /*<! exponent of quantized alpha >*/
Tensor<feature_t> *output; /*<! output ptr of leakyrelu>*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
public:

/**
* @brief Construct a new LeakyReLU object
*
* @param activation_alpha quantized alpha
* @param activation_exponent exponent of quantized alpha
* @param name name of leakyrelu
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
{
this->activation_alpha = activation_alpha;
this->activation_exponent = activation_exponent;
@@ -47,7 +47,7 @@ namespace dl
* @brief Destroy the LeakyReLU object
*
*/
~LeakyReLU()
~LeakyReLU()
{
if ((!this->inplace) && (this->output != NULL))
{
@@ -59,24 +59,32 @@ namespace dl
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
if(!this->inplace)
this->output_shape = input.shape;
if (!this->inplace)
{
if(this->output != NULL)
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_shape(input.shape);
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(input.exponent);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}

}

/**
@@ -100,10 +108,14 @@ namespace dl
{
DL_LOG_LAYER_LATENCY_INIT();

if(!this->inplace)
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -114,6 +126,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
}
46 changes: 32 additions & 14 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_max2d.hpp
Original file line number Diff line number Diff line change
@@ -22,28 +22,28 @@ namespace dl
class Max2D : public Layer
{
private:
Tensor<feature_t> *output; /*<! output ptr of max2d >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
Tensor<feature_t> *output; /*<! output ptr of max2d >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of max2d >*/
public:

/**
* @brief Construct a new Max2D object.
*
* @param name name of max2d
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
Max2D(const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
Max2D(const char *name = "Max2D", bool inplace = false) : Layer(name),
output(NULL), inplace(inplace), output_shape({})
{
this->inplace = inplace;
}

/**
* @brief Destroy the Max2D object
*
*/
~Max2D()
~Max2D()
{
if ((!this->inplace) && (this->output != NULL))
{
@@ -58,24 +58,34 @@ namespace dl
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
assert(input0.exponent == input1.exponent);
this->output_shape = input0.shape;

if(!this->inplace)
if (!this->inplace)
{
if(this->output != NULL)
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(input0.shape);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input0;
}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -100,10 +110,14 @@ namespace dl
{
DL_LOG_LAYER_LATENCY_INIT();

if(!this->inplace)
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input0.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -114,6 +128,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::max2d<true>(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
}
Original file line number Diff line number Diff line change
@@ -23,44 +23,54 @@ namespace dl
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of MaxPool2D >*/
std::vector<int> output_shape; /*<! output shape of MaxPool2D >*/

public:

/**
* @brief Construct a new MaxPool2D object.
*
* @param filter_shape filter shape in [filter_height, filter_width]
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME results padding in TensorFlow style
* - PADDING_SAME_MXNET results padding in MXNET style
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y stride in height
* @param stride_x stride in width
* @param name name of layer
*/
MaxPool2D(const std::vector<int> filter_shape,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = NULL) : Layer(name),
filter_shape(filter_shape),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type)
const char *name = "MaxPool2D") : Layer(name),
filter_shape(filter_shape),
padding_type(padding_type),
padding(padding),
stride_y(stride_y),
stride_x(stride_x),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}

/**
* @brief Destroy the MaxPool2D object.
*
*/
~MaxPool2D()
~MaxPool2D()
{
if (this->output != NULL)
{
@@ -72,18 +82,29 @@ namespace dl
* @brief Update output shape and padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);

this->output->set_exponent(input.exponent);
std::vector<int> output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
this->output->set_shape(output_shape);
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
this->output->set_shape(this->output_shape);

this->padding = nn::get_pad_size(output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
input.set_padding_size(this->padding);
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
}
this->output->free_element();

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -111,7 +132,11 @@ namespace dl
DL_LOG_LAYER_LATENCY_INIT();

DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

53 changes: 35 additions & 18 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_min2d.hpp
Original file line number Diff line number Diff line change
@@ -22,28 +22,28 @@ namespace dl
class Min2D : public Layer
{
private:
Tensor<feature_t> *output; /*<! output of ptr min2d>*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
public:

Tensor<feature_t> *output; /*<! output of ptr min2d>*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of min2d >*/
public:
/**
* @brief Construct a new Min2D object
*
* @param name name of min2d
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
Min2D(const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
{
this->inplace = inplace;
}
Min2D(const char *name = "Min2D", bool inplace = false) : Layer(name),
output(NULL),
inplace(inplace),
output_shape({}) {}

/**
* @brief Destroy the Min2D object
*
*/
~Min2D()
~Min2D()
{
if ((!this->inplace) && (this->output != NULL))
{
@@ -58,25 +58,34 @@ namespace dl
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
assert(input0.exponent == input1.exponent);
this->output_shape = input0.shape;

if(!this->inplace)
if (!this->inplace)
{
if(this->output != NULL)
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_shape(input0.shape);
this->output->set_shape(this->output_shape);
this->output->set_exponent(input0.exponent);
this->output->free_element();
}
else
{
this->output = &input0;

}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -101,10 +110,14 @@ namespace dl
{
DL_LOG_LAYER_LATENCY_INIT();

if(!this->inplace)
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input0.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -115,6 +128,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::min2d<true>(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
}
55 changes: 39 additions & 16 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_mul2d.hpp
Original file line number Diff line number Diff line change
@@ -21,33 +21,38 @@ namespace dl
class Mul2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const int output_exponent; /*<! exponent of output >*/
const Activation<feature_t> *activation; /*<! activation of Mul2D, if you don't specify anything, no activation is applied >*/
Tensor<feature_t> *output; /*<! output ptr of Mul2D >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
Tensor<feature_t> *output; /*<! output ptr of Mul2D >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of Mul2D >*/
public:
const int output_exponent; /*<! exponent of output >*/

/**
* @brief Construct a new Mul2D object.
*
* @param output_exponent exponent of output
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
* @param name name of layer
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
Mul2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = NULL, bool inplace = false) : Layer(name),
output_exponent(output_exponent),activation(activation), output(NULL)
Mul2D(const int output_exponent,
const Activation<feature_t> *activation = NULL,
const char *name = "Mul2D",
bool inplace = false) : Layer(name),
output_exponent(output_exponent),
activation(activation),
output(NULL),
inplace(inplace),
output_shape({})
{
this->inplace = inplace;
}

/**
* @brief Destroy the Multiply2D object.
*/
~Mul2D()
~Mul2D()
{
if ((!this->inplace) && (this->output != NULL))
{
@@ -61,24 +66,34 @@ namespace dl
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
this->output_shape = input0.shape;

if (!this->inplace)
{
if(this->output != NULL)
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(input0.shape);
this->output->set_shape(this->output_shape);
this->output->free_element();
}

else
{
this->output = &input0;
}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -106,7 +121,11 @@ namespace dl
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -117,6 +136,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
}
49 changes: 35 additions & 14 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_prelu.hpp
Original file line number Diff line number Diff line change
@@ -24,30 +24,35 @@ namespace dl
int activation_exponent; /*<! exponent of quantized alpha elements >*/
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of prelu >*/
public:

/**
* @brief Construct a new PReLU object
*
* @param activation_element quantized alpha elements along channel axis
* @param activation_exponent exponent of quantized alpha elements
* @param name name of prelu
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
PReLU(const feature_t *activation_element, const int activation_exponent = 0, const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
PReLU(const feature_t *activation_element,
const int activation_exponent = 0,
const char *name = NULL,
bool inplace = "PReLU") : Layer(name),
activation_element(activation_element),
activation_exponent(activation_exponent),
output(NULL),
inplace(inplace),
output_shape({})
{
this->activation_element = activation_element;
this->activation_exponent = activation_exponent;
this->inplace = inplace;
}

/**
* @brief Destroy the PReLU object
*
*/
~PReLU()
~PReLU()
{
if ((!this->inplace) && (this->output != NULL))
{
@@ -59,23 +64,31 @@ namespace dl
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
if(!this->inplace)
this->output_shape = input.shape;
if (!this->inplace)
{
if(this->output != NULL)
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(input.exponent);
this->output->set_shape(input.shape);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -99,11 +112,15 @@ namespace dl
{
DL_LOG_LAYER_LATENCY_INIT();

if(!this->inplace)
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->set_exponent(input.exponent);
this->output->apply_element();
this->output->malloc_element();
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

DL_LOG_LAYER_LATENCY_START();
@@ -113,6 +130,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
}
45 changes: 30 additions & 15 deletions tools/sdk/esp32/include/esp-face/include/layer/dl_layer_relu.hpp
Original file line number Diff line number Diff line change
@@ -21,29 +21,28 @@ namespace dl
class ReLU : public Layer
{
private:
Tensor<feature_t> *output; /*<! output ptr of relu >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a seperate memeory >*/
Tensor<feature_t> *output; /*<! output ptr of relu >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of relu >*/
public:


/**
* @brief Construct a new ReLU object
*
* @param name name of relu
* @param inplace true: the output will store to input0
* false: the output will store to a seperate memeory
* false: the output will store to a separate memory
*/
ReLU(const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
ReLU(const char *name = "ReLU", bool inplace = false) : Layer(name),
output(NULL), inplace(inplace), output_shape({})
{
this->inplace = inplace;
}

/**
* @brief Destroy the ReLU object
*
*/
~ReLU()
~ReLU()
{
if ((!this->inplace) && (this->output != NULL))
{
@@ -55,23 +54,31 @@ namespace dl
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input)
void build(Tensor<feature_t> &input, bool print_shape = false)
{
if(!this->inplace)
this->output_shape = input.shape;
if (!this->inplace)
{
if(this->output != NULL)
if (this->output != NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(input.exponent);
this->output->set_shape(input.shape);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
}

if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}

/**
@@ -95,10 +102,14 @@ namespace dl
{
DL_LOG_LAYER_LATENCY_INIT();

if(!this->inplace)
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");

@@ -109,6 +120,10 @@ namespace dl
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::relu(*this->output, input, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "relu");
}
Loading