1
+ #include "R.h"
1
2
#include "ggml-backend-impl.h"
2
3
#include "ggml-alloc.h"
3
4
#include "ggml-impl.h"
@@ -231,7 +232,7 @@ void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst
231
232
//printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]);
232
233
GGML_ASSERT (ggml_are_same_layout (src , dst ) && "cannot copy tensors with different layouts" );
233
234
234
- // fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src));
235
+ // Rprintf( "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src));
235
236
236
237
if (src == dst ) {
237
238
return ;
@@ -246,7 +247,7 @@ void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst
246
247
} else {
247
248
// shouldn't be hit when copying from/to CPU
248
249
#ifndef NDEBUG
249
- fprintf ( stderr , "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to "
250
+ Rprintf ( "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to "
250
251
"are implemented for %s and %s, falling back to get/set\n" , src -> name , dst -> name );
251
252
#endif
252
253
size_t nbytes = ggml_nbytes (src );
@@ -312,7 +313,7 @@ void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml
312
313
snprintf (ggml_backend_registry [id ].name , sizeof (ggml_backend_registry [id ].name ), "%s" , name );
313
314
314
315
#ifndef NDEBUG
315
- fprintf ( stderr , "%s: registered backend %s\n" , __func__ , name );
316
+ Rprintf ( "%s: registered backend %s\n" , __func__ , name );
316
317
#endif
317
318
318
319
ggml_backend_registry_count ++ ;
@@ -355,7 +356,7 @@ ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str)
355
356
size_t backend_i = ggml_backend_reg_find_by_name (backend_name );
356
357
357
358
if (backend_i == SIZE_MAX ) {
358
- fprintf ( stderr , "%s: backend %s not found\n" , __func__ , backend_name );
359
+ Rprintf ( "%s: backend %s not found\n" , __func__ , backend_name );
359
360
return NULL ;
360
361
}
361
362
@@ -510,7 +511,7 @@ static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_
510
511
void * ptr ;
511
512
int result = hbw_posix_memalign (& ptr , ggml_backend_cpu_buffer_type_get_alignment (buft ), size );
512
513
if (result != 0 ) {
513
- fprintf ( stderr , "failed to allocate HBM buffer of size %zu\n" , size );
514
+ Rprintf ( "failed to allocate HBM buffer of size %zu\n" , size );
514
515
return NULL ;
515
516
}
516
517
@@ -842,13 +843,13 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
842
843
for (int i = 0 ; i < graph -> n_nodes ; i ++ ) {
843
844
if (cur_split < sched -> n_splits && i == sched -> splits [cur_split ].i_start ) {
844
845
ggml_backend_t split_backend = get_allocr_backend (sched , sched -> splits [cur_split ].tallocr );
845
- fprintf ( stderr , "\n## SPLIT #%d: %s # %d inputs: " , cur_split , ggml_backend_name (split_backend ),
846
+ Rprintf ( "\n## SPLIT #%d: %s # %d inputs: " , cur_split , ggml_backend_name (split_backend ),
846
847
sched -> splits [cur_split ].n_inputs );
847
848
for (int j = 0 ; j < sched -> splits [cur_split ].n_inputs ; j ++ ) {
848
- fprintf ( stderr , "[%s (%5.5s)] " , sched -> splits [cur_split ].inputs [j ]-> name ,
849
+ Rprintf ( "[%s (%5.5s)] " , sched -> splits [cur_split ].inputs [j ]-> name ,
849
850
fmt_size (ggml_nbytes (sched -> splits [cur_split ].inputs [j ])));
850
851
}
851
- fprintf ( stderr , "\n" );
852
+ Rprintf ( "\n" );
852
853
cur_split ++ ;
853
854
}
854
855
struct ggml_tensor * node = graph -> nodes [i ];
@@ -857,7 +858,7 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
857
858
}
858
859
ggml_tallocr_t node_allocr = node_allocr (node );
859
860
ggml_backend_t node_backend = node_allocr ? get_allocr_backend (sched , node_allocr ) : NULL ; // FIXME:
860
- fprintf ( stderr , "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:" , i , ggml_op_name (node -> op ), node -> name ,
861
+ Rprintf ( "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:" , i , ggml_op_name (node -> op ), node -> name ,
861
862
fmt_size (ggml_nbytes (node )), node_allocr ? ggml_backend_name (node_backend ) : "NULL" , GET_CAUSE (node ));
862
863
for (int j = 0 ; j < GGML_MAX_SRC ; j ++ ) {
863
864
struct ggml_tensor * src = node -> src [j ];
@@ -866,10 +867,10 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
866
867
}
867
868
ggml_tallocr_t src_allocr = node_allocr (src );
868
869
ggml_backend_t src_backend = src_allocr ? get_allocr_backend (sched , src_allocr ) : NULL ;
869
- fprintf ( stderr , " %20.20s (%4.4s) [%4.4s %8.8s]" , src -> name ,
870
+ Rprintf ( " %20.20s (%4.4s) [%4.4s %8.8s]" , src -> name ,
870
871
fmt_size (ggml_nbytes (src )), src_backend ? ggml_backend_name (src_backend ) : "NULL" , GET_CAUSE (src ));
871
872
}
872
- fprintf ( stderr , "\n" );
873
+ Rprintf ( "\n" );
873
874
}
874
875
}
875
876
@@ -1049,15 +1050,15 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
1049
1050
sched -> splits [cur_split ].i_end = graph -> n_nodes ;
1050
1051
sched -> n_splits = cur_split + 1 ;
1051
1052
1052
- //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
1053
+ //Rprintf( "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
1053
1054
1054
1055
#if 1
1055
1056
// sanity check: all sources should have the same backend as the node
1056
1057
for (int i = 0 ; i < graph -> n_nodes ; i ++ ) {
1057
1058
struct ggml_tensor * node = graph -> nodes [i ];
1058
1059
ggml_tallocr_t node_allocr = node_allocr (node );
1059
1060
if (node_allocr == NULL ) {
1060
- fprintf ( stderr , "!!!!!!! %s has no backend\n" , node -> name );
1061
+ Rprintf ( "!!!!!!! %s has no backend\n" , node -> name );
1061
1062
}
1062
1063
for (int j = 0 ; j < GGML_MAX_SRC ; j ++ ) {
1063
1064
struct ggml_tensor * src = node -> src [j ];
@@ -1066,7 +1067,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
1066
1067
}
1067
1068
ggml_tallocr_t src_allocr = node_allocr (src );
1068
1069
if (src_allocr != node_allocr /* && src_backend != NULL */ ) { // ignore nulls for now
1069
- fprintf ( stderr , "!!!! %s has backend %s, src %d (%s) has backend %s\n" ,
1070
+ Rprintf ( "!!!! %s has backend %s, src %d (%s) has backend %s\n" ,
1070
1071
node -> name , node_allocr ? ggml_backend_name (get_allocr_backend (sched , node_allocr )) : "NULL" ,
1071
1072
j , src -> name , src_allocr ? ggml_backend_name (get_allocr_backend (sched , src_allocr )) : "NULL" );
1072
1073
}
@@ -1122,15 +1123,15 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
1122
1123
struct ggml_tensor * input_cpy = sched -> node_copies [hash_id (input )][sched_backend_prio (sched , split_backend )];
1123
1124
if (input -> buffer == NULL ) {
1124
1125
if (input -> view_src == NULL ) {
1125
- fprintf ( stderr , "input %s has no buffer and no view_src\n" , input -> name );
1126
- exit ( 1 );
1126
+ Rprintf ( "input %s has no buffer and no view_src\n" , input -> name );
1127
+ Rf_error ( "whispercpp error" );
1127
1128
}
1128
1129
// FIXME: may need to use the sched buffer instead
1129
1130
ggml_backend_view_init (input -> view_src -> buffer , input );
1130
1131
}
1131
1132
if (input_cpy -> buffer == NULL ) {
1132
- fprintf ( stderr , "input_cpy %s has no buffer\n" , input_cpy -> name );
1133
- exit ( 1 );
1133
+ Rprintf ( "input_cpy %s has no buffer\n" , input_cpy -> name );
1134
+ Rf_error ( "whispercpp error" );
1134
1135
}
1135
1136
//GGML_ASSERT(input->buffer->backend != input_cpy->buffer->backend);
1136
1137
//GGML_ASSERT(input_cpy->buffer->backend == split_backend);
@@ -1155,10 +1156,10 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
1155
1156
1156
1157
#if 0
1157
1158
// per-backend timings
1158
- fprintf ( stderr , "sched_compute_splits times (%d splits):\n" , sched -> n_splits );
1159
+ Rprintf ( "sched_compute_splits times (%d splits):\n" , sched -> n_splits );
1159
1160
for (int i = 0 ; i < sched -> n_backends ; i ++ ) {
1160
1161
if (copy_us [i ] > 0 || compute_us [i ] > 0 ) {
1161
- fprintf ( stderr , "\t%5.5s: %lu us copy, %lu us compute\n" , ggml_backend_name (sched -> backends [i ]), copy_us [i ], compute_us [i ]);
1162
+ Rprintf ( "\t%5.5s: %lu us copy, %lu us compute\n" , ggml_backend_name (sched -> backends [i ]), copy_us [i ], compute_us [i ]);
1162
1163
}
1163
1164
}
1164
1165
#endif
0 commit comments