@@ -363,6 +363,8 @@ function retrieveModelResponseExtraction (tags, body) {
363
363
tags [ 'openai.response.parent' ] = body . parent
364
364
tags [ 'openai.response.root' ] = body . root
365
365
366
+ if ( ! body . permission ) return
367
+
366
368
tags [ 'openai.response.permission.id' ] = body . permission [ 0 ] . id
367
369
tags [ 'openai.response.permission.created' ] = body . permission [ 0 ] . created
368
370
tags [ 'openai.response.permission.allow_create_engine' ] = body . permission [ 0 ] . allow_create_engine
@@ -382,10 +384,14 @@ function commonLookupFineTuneRequestExtraction (tags, body) {
382
384
}
383
385
384
386
function listModelsResponseExtraction ( tags , body ) {
387
+ if ( ! body . data ) return
388
+
385
389
tags [ 'openai.response.count' ] = body . data . length
386
390
}
387
391
388
392
function commonImageResponseExtraction ( tags , body ) {
393
+ if ( ! body . data ) return
394
+
389
395
tags [ 'openai.response.images_count' ] = body . data . length
390
396
391
397
for ( let i = 0 ; i < body . data . length ; i ++ ) {
@@ -400,7 +406,7 @@ function createAudioResponseExtraction (tags, body) {
400
406
tags [ 'openai.response.text' ] = body . text
401
407
tags [ 'openai.response.language' ] = body . language
402
408
tags [ 'openai.response.duration' ] = body . duration
403
- tags [ 'openai.response.segments_count' ] = body . segments . length
409
+ tags [ 'openai.response.segments_count' ] = defensiveArrayLength ( body . segments )
404
410
}
405
411
406
412
function createFineTuneRequestExtraction ( tags , body ) {
@@ -417,21 +423,24 @@ function createFineTuneRequestExtraction (tags, body) {
417
423
}
418
424
419
425
function commonFineTuneResponseExtraction ( tags , body ) {
420
- tags [ 'openai.response.events_count' ] = body . events . length
426
+ tags [ 'openai.response.events_count' ] = defensiveArrayLength ( body . events )
421
427
tags [ 'openai.response.fine_tuned_model' ] = body . fine_tuned_model
422
- tags [ 'openai.response.hyperparams.n_epochs' ] = body . hyperparams . n_epochs
423
- tags [ 'openai.response.hyperparams.batch_size' ] = body . hyperparams . batch_size
424
- tags [ 'openai.response.hyperparams.prompt_loss_weight' ] = body . hyperparams . prompt_loss_weight
425
- tags [ 'openai.response.hyperparams.learning_rate_multiplier' ] = body . hyperparams . learning_rate_multiplier
426
- tags [ 'openai.response.training_files_count' ] = body . training_files . length
427
- tags [ 'openai.response.result_files_count' ] = body . result_files . length
428
- tags [ 'openai.response.validation_files_count' ] = body . validation_files . length
428
+ if ( body . hyperparams ) {
429
+ tags [ 'openai.response.hyperparams.n_epochs' ] = body . hyperparams . n_epochs
430
+ tags [ 'openai.response.hyperparams.batch_size' ] = body . hyperparams . batch_size
431
+ tags [ 'openai.response.hyperparams.prompt_loss_weight' ] = body . hyperparams . prompt_loss_weight
432
+ tags [ 'openai.response.hyperparams.learning_rate_multiplier' ] = body . hyperparams . learning_rate_multiplier
433
+ }
434
+ tags [ 'openai.response.training_files_count' ] = defensiveArrayLength ( body . training_files )
435
+ tags [ 'openai.response.result_files_count' ] = defensiveArrayLength ( body . result_files )
436
+ tags [ 'openai.response.validation_files_count' ] = defensiveArrayLength ( body . validation_files )
429
437
tags [ 'openai.response.updated_at' ] = body . updated_at
430
438
tags [ 'openai.response.status' ] = body . status
431
439
}
432
440
433
441
// the OpenAI package appears to stream the content download then provide it all as a singular string
434
442
function downloadFileResponseExtraction ( tags , body ) {
443
+ if ( ! body . file ) return
435
444
tags [ 'openai.response.total_bytes' ] = body . file . length
436
445
}
437
446
@@ -472,20 +481,26 @@ function createRetrieveFileResponseExtraction (tags, body) {
472
481
function createEmbeddingResponseExtraction ( tags , body ) {
473
482
usageExtraction ( tags , body )
474
483
484
+ if ( ! body . data ) return
485
+
475
486
tags [ 'openai.response.embeddings_count' ] = body . data . length
476
487
for ( let i = 0 ; i < body . data . length ; i ++ ) {
477
488
tags [ `openai.response.embedding.${ i } .embedding_length` ] = body . data [ i ] . embedding . length
478
489
}
479
490
}
480
491
481
492
function commonListCountResponseExtraction ( tags , body ) {
493
+ if ( ! body . data ) return
482
494
tags [ 'openai.response.count' ] = body . data . length
483
495
}
484
496
485
497
// TODO: Is there ever more than one entry in body.results?
486
498
function createModerationResponseExtraction ( tags , body ) {
487
499
tags [ 'openai.response.id' ] = body . id
488
500
// tags[`openai.response.model`] = body.model // redundant, already extracted globally
501
+
502
+ if ( ! body . results ) return
503
+
489
504
tags [ 'openai.response.flagged' ] = body . results [ 0 ] . flagged
490
505
491
506
for ( const [ category , match ] of Object . entries ( body . results [ 0 ] . categories ) ) {
@@ -501,6 +516,8 @@ function createModerationResponseExtraction (tags, body) {
501
516
function commonCreateResponseExtraction ( tags , body , store ) {
502
517
usageExtraction ( tags , body )
503
518
519
+ if ( ! body . choices ) return
520
+
504
521
tags [ 'openai.response.choices_count' ] = body . choices . length
505
522
506
523
store . choices = body . choices
0 commit comments