Skip to content

Commit 69dbeae

Browse files
committed
examples/natmod/deepcraft: Add code changes.
Signed-off-by: NikhitaR-IFX <nikhita.rajasekhar@infineon.com>
1 parent 446d370 commit 69dbeae

4 files changed

Lines changed: 50 additions & 205 deletions

File tree

examples/natmod/deepcraft/Makefile

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,18 @@ ARCH = armv7emsp
1313
# Link with libm.a and libgcc.a from the toolchain
1414
LINK_RUNTIME = 1
1515

16+
CFLAGS += -Dsqrtf=__builtin_sqrtf -fno-math-errno -fno-trapping-math -fno-builtin-logf -ffreestanding
17+
CFLAGS += --data-sections
18+
# If -lm is required:
19+
# Even stronger — exclude specific problematic objects
20+
LDFLAGS += -Wl,--exclude-libs,ALL
21+
22+
23+
24+
25+
26+
#CFLAGS += -Dlogf=__builtin_logf -ffreestanding -fno-math-errno -fno-trapping-math -ffast-math
27+
1628
# Include to get the rules for compiling and linking the module
1729
include $(MPY_DIR)/py/dynruntime.mk
1830

examples/natmod/deepcraft/dc_mp_iface.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,34 @@
11
#include "py/dynruntime.h"
22

33

4+
#if !defined(__linux__)
5+
void *memcpy(void *dst, const void *src, size_t n) {
6+
return mp_fun_table.memmove_(dst, src, n);
7+
}
8+
void *memset(void *s, int c, size_t n) {
9+
return mp_fun_table.memset_(s, c, n);
10+
}
11+
/*float logf(float x) {
12+
// Optional: You can define a very basic approximation, or just return x
13+
// to make the code compile and defer real computation
14+
return x; // Placeholder – replace with approximation if needed
15+
}*/
16+
17+
#endif
18+
19+
int native_errno=0;
20+
#if defined(__linux__)
21+
int *__errno_location (void)
22+
#else
23+
int *__errno (void)
24+
#endif
25+
{
26+
return &native_errno;
27+
}
28+
429
#include "examples/natmod/deepcraft/mp_src.c"
530

31+
632
typedef struct _dc_obj_t {
733
mp_obj_base_t base;
834
} dc_obj_t;

examples/natmod/deepcraft/model.c

Lines changed: 6 additions & 199 deletions
Original file line numberDiff line numberDiff line change
@@ -1,197 +1,3 @@
1-
/*
2-
* DEEPCRAFT Studio 5.2.2102+2abb7d94b69921b66d282c11929c83b8adc99268
3-
* Copyright © 2023- Imagimob AB, All Rights Reserved.
4-
*
5-
* Generated at 12/09/2024 06:20:58 UTC. Any changes will be lost.
6-
*
7-
* Model ID d30ff2e4-3b55-4b8f-b3e5-ab5513979b3b
8-
*
9-
* Memory Size Efficiency
10-
* Buffers 33600 bytes (RAM) 86 %
11-
* State 8392 bytes (RAM) 100 %
12-
* Readonly 155056 bytes (Flash) 100 %
13-
*
14-
* Backend tensorflow
15-
* Keras Version 2.15.0
16-
* Backend Model Type Sequential
17-
* Backend Model Name conv2d-medium-speed-0
18-
*
19-
* Class Index | Symbol Label
20-
* 0 | unlabelled
21-
* 1 | baby_cry
22-
*
23-
* Layer Shape Type Function
24-
* Sliding Window (data points) [512] float dequeue
25-
* window_shape = [512]
26-
* stride = 160
27-
* buffer_multiplier = 1
28-
* Hann smoothing [512] float dequeue
29-
* sym = True
30-
* Real Discrete Fourier Transform [257,2] float dequeue
31-
* axis = 0
32-
* Frobenius norm [257] float dequeue
33-
* axis = 0
34-
* Mel Filterbank [20] float dequeue
35-
* num_filters = 20
36-
* sample_rate = 16000
37-
* f_low = 300
38-
* f_high = 8000
39-
* htk = True
40-
* librosa = False
41-
* Clip [20] float dequeue
42-
* min = 0.000316227766016
43-
* max = 3.40282347E+38
44-
* Logarithm [20] float dequeue
45-
* base = 0
46-
* Sliding Window (data points) [60,20] float dequeue
47-
* window_shape = [60,20]
48-
* stride = 660
49-
* buffer_multiplier = 1
50-
* Input Layer [60,20] float dequeue
51-
* shape = [60,20]
52-
* Reshape [60,20,1] float dequeue
53-
* shape = [60,20,1]
54-
* trainable = True
55-
* Convolution 2D [30,10,12] float dequeue
56-
* filters = 12
57-
* kernel_size = [5,5]
58-
* strides = [2,2]
59-
* padding = same
60-
* activation = linear
61-
* use_bias = False
62-
* trainable = True
63-
* weight = float[5,5,1,12]
64-
* Batch Normalization [30,10,12] float dequeue
65-
* epsilon = 0.001
66-
* trainable = True
67-
* scale = True
68-
* center = True
69-
* axis = 3
70-
* gamma = float[12]
71-
* beta = float[12]
72-
* mean = float[12]
73-
* variance = float[12]
74-
* Activation [30,10,12] float dequeue
75-
* activation = relu
76-
* trainable = True
77-
* Convolution 2D [15,5,24] float dequeue
78-
* filters = 24
79-
* kernel_size = [5,5]
80-
* strides = [2,2]
81-
* padding = same
82-
* activation = linear
83-
* use_bias = False
84-
* trainable = True
85-
* weight = float[5,5,12,24]
86-
* Convolution 2D [8,3,24] float dequeue
87-
* filters = 24
88-
* kernel_size = [5,5]
89-
* strides = [2,2]
90-
* padding = same
91-
* activation = linear
92-
* use_bias = False
93-
* trainable = True
94-
* weight = float[5,5,24,24]
95-
* Batch Normalization [8,3,24] float dequeue
96-
* epsilon = 0.001
97-
* trainable = True
98-
* scale = True
99-
* center = True
100-
* axis = 3
101-
* gamma = float[24]
102-
* beta = float[24]
103-
* mean = float[24]
104-
* variance = float[24]
105-
* Activation [8,3,24] float dequeue
106-
* activation = relu
107-
* trainable = True
108-
* Convolution 2D [8,3,32] float dequeue
109-
* filters = 32
110-
* kernel_size = [3,3]
111-
* strides = [1,1]
112-
* padding = same
113-
* activation = linear
114-
* use_bias = False
115-
* trainable = True
116-
* weight = float[3,3,24,32]
117-
* Convolution 2D [8,3,32] float dequeue
118-
* filters = 32
119-
* kernel_size = [3,3]
120-
* strides = [1,1]
121-
* padding = same
122-
* activation = linear
123-
* use_bias = False
124-
* trainable = True
125-
* weight = float[3,3,32,32]
126-
* Batch Normalization [8,3,32] float dequeue
127-
* epsilon = 0.001
128-
* trainable = True
129-
* scale = True
130-
* center = True
131-
* axis = 3
132-
* gamma = float[32]
133-
* beta = float[32]
134-
* mean = float[32]
135-
* variance = float[32]
136-
* Activation [8,3,32] float dequeue
137-
* activation = relu
138-
* trainable = True
139-
* Global average pooling 2D [32] float dequeue
140-
* Dense [2] float dequeue
141-
* units = 2
142-
* use_bias = True
143-
* activation = linear
144-
* trainable = True
145-
* weight = float[32,2]
146-
* bias = float[2]
147-
* Activation [2] float dequeue
148-
* activation = softmax
149-
* trainable = True
150-
*
151-
* Exported functions:
152-
*
153-
* int IMAI_dequeue(float *restrict data_out)
154-
* Description: Dequeue features. RET_SUCCESS (0) on success, RET_NODATA (-1) if no data is available, RET_NOMEM (-2) on internal memory error
155-
* Parameter data_out is Output of size float[2].
156-
*
157-
* int IMAI_enqueue(const float *restrict data_in)
158-
* Description: Enqueue features. Returns SUCCESS (0) on success, else RET_NOMEM (-2) when low on memory.
159-
* Parameter data_in is Input of size float[1].
160-
*
161-
* void IMAI_init(void)
162-
* Description: Initializes buffers to initial state. This function also works as a reset function.
163-
*
164-
*
165-
* Disclaimer:
166-
* The generated code relies on the optimizations done by the C compiler.
167-
* For example many for-loops of length 1 must be removed by the optimizer.
168-
* This can only be done if the functions are inlined and simplified.
169-
* Check disassembly if unsure.
170-
* tl;dr Compile using gcc with -O3 or -Ofast
171-
*/
172-
173-
/*
174-
* Tensorflow Test Set
175-
*
176-
* (ACC) Accuracy 82.509 %
177-
* (F1S) F1 Score 83.636 %
178-
*
179-
* Name of class (unlabelled) baby_cry
180-
* (TP) True Positive or Correct Positive Prediction 622 62
181-
* (FN) False Negative or Incorrect Negative Prediction 94 51
182-
* (FP) False Positive or Incorrect Positive Prediction 51 94
183-
* (TN) True Negative or Correct Negative Prediction 62 622
184-
* (TPR) True Positive Rate or Sensitivity, Recall 86.87 % 54.87 %
185-
* (TNR) True Negative Rate or Specificity, Selectivity 54.87 % 86.87 %
186-
* (PPV) Positive Predictive Value or Precision 92.42 % 39.74 %
187-
* (NPV) Negative Predictive Value 39.74 % 92.42 %
188-
* (FNR) False Negative Rate or Miss Rate 13.13 % 45.13 %
189-
* (FPR) False Positive Rate or Fall-Out 45.13 % 13.13 %
190-
* (FDR) False Discovery Rate 7.58 % 60.26 %
191-
* (FOR) False Omission Rate 60.26 % 7.58 %
192-
* (F1S) F1 Score 89.56 % 46.10 %
193-
*/
194-
1951
#include <float.h>
1962
#include <math.h>
1973
#include <stdint.h>
@@ -204,7 +10,7 @@ int8_t _buffer[33600];
20410
int8_t _state[8392];
20511

20612
// Parameters
207-
static const uint32_t _K4[] = {
13+
const uint32_t _K4[] = {
20814
0x00000000, 0x381e87c4, 0x391e863b, 0x39b25423, 0x3a1e8019, 0x3a77a0f6, 0x3ab2449b, 0x3af29a52,
20915
0x3b1e6790, 0x3b487014, 0x3b776514, 0x3b95a260, 0x3bb2068a, 0x3bd0ddef, 0x3bf2275e, 0x3c0af0c6,
21016
0x3c1e058c, 0x3c325144, 0x3c47d325, 0x3c5e8a59, 0x3c767600, 0x3c87ca96, 0x3c94f373, 0x3ca2b513,
@@ -5159,7 +4965,7 @@ typedef struct
51594965
#define CBUFFER_NOMEM -1
51604966

51614967
// Initializes a cbuffer handle with given memory and size.
5162-
inline void cbuffer_init(cbuffer_t *dest, void *mem, int size) {
4968+
static inline void cbuffer_init(cbuffer_t *dest, void *mem, int size) {
51634969
dest->buf = mem;
51644970
dest->size = size;
51654971
dest->used = 0;
@@ -5168,7 +4974,7 @@ inline void cbuffer_init(cbuffer_t *dest, void *mem, int size) {
51684974
}
51694975

51704976
// Returns the number of free bytes in buffer.
5171-
inline int cbuffer_get_free(cbuffer_t *buf) {
4977+
static inline int cbuffer_get_free(cbuffer_t *buf) {
51724978
return buf->size - buf->used;
51734979
}
51744980

@@ -8267,7 +8073,7 @@ int IMAI_dequeue(float *restrict data_out) {
82678073
loge_f32(_K13, 20, _K14);
82688074
__RETURN_ERROR_BREAK_EMPTY(fixwin_enqueuef32(_K16, _K14));
82698075
}
8270-
__RETURN_ERROR(fixwin_dequeuef32(_K16, _K15, 33));
8076+
/*__RETURN_ERROR(fixwin_dequeuef32(_K16, _K15, 33));
82718077
conv2d_f32(_K15, _K18, _K17, 12, 5, 5, 2, 2, 60, 20, 1, 1, 2, 1, 2);
82728078
mul_f32(_K17, _K20, 1, 1, 1, 300, 12, _K22);
82738079
add_f32(_K22, _K21, 1, 1, 1, 300, 12, _K19);
@@ -8285,7 +8091,7 @@ int IMAI_dequeue(float *restrict data_out) {
82858091
globav2d_f32(_K41, 3, 8, 32, _K42);
82868092
dott_f32(_K43, _K42, _K44, 32, 2, 1);
82878093
add_f32(_K44, _K45, 1, 1, 1, 1, 2, _K46);
8288-
softmax_f32(_K46, 2, data_out);
8094+
softmax_f32(_K46, 2, data_out);*/
82898095
return 0;
82908096
}
82918097

@@ -8298,3 +8104,4 @@ void IMAI_init(void) {
82988104
fixwin_initf32(_K3, 4, 512);
82998105
fixwin_initf32(_K16, 80, 60);
83008106
}
8107+

examples/natmod/deepcraft/mp_src.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@
44

55
mp_obj_t init(void){
66
//assign();
7-
//IMAI_init();
7+
IMAI_init();
88
return mp_const_none;
99
}
1010

1111
mp_obj_t enqueue(const mp_obj_t data_in_obj){
12-
/*float data_in[IMAI_DATA_IN_COUNT];
12+
float data_in[IMAI_DATA_IN_COUNT];
1313
mp_obj_t *data_in_items;
1414
size_t len;
1515
mp_obj_get_array(data_in_obj, &len, &data_in_items);
@@ -20,12 +20,12 @@ mp_obj_t enqueue(const mp_obj_t data_in_obj){
2020
data_in[i] = mp_obj_get_float(data_in_items[i]);
2121
}
2222
int result = IMAI_enqueue(data_in);
23-
return MP_OBJ_NEW_SMALL_INT(result);*/
23+
return MP_OBJ_NEW_SMALL_INT(result);
2424
return mp_const_none;
2525
}
2626

2727
mp_obj_t dequeue(mp_obj_t data_out_obj) {
28-
/*mp_buffer_info_t buf_info;
28+
mp_buffer_info_t buf_info;
2929
mp_get_buffer(data_out_obj, &buf_info, MP_BUFFER_WRITE);
3030
float *data_out = (float *)buf_info.buf;
3131
int result = IMAI_dequeue(data_out);
@@ -36,8 +36,8 @@ mp_obj_t dequeue(mp_obj_t data_out_obj) {
3636
} else if (result == -2) {
3737
mp_raise_ValueError(MP_ERROR_TEXT("Internal memory allocation error"));
3838
}
39-
return MP_OBJ_NEW_SMALL_INT(result);*/
40-
return mp_const_none;
39+
return MP_OBJ_NEW_SMALL_INT(result);
40+
//return mp_const_none;
4141
}
4242

4343

0 commit comments

Comments
 (0)