Skip to content

onestop

ArgsParser

Bases: Tap

Args parser for preprocessing.py

Note, for fixation data, the X_IA_DWELL_TIME, for X in
[total, min, max, part_total, part_min, part_max]
columns are computed based on the CURRENT_FIX_DURATION column.

Note, documentation was generated automatically. Please check the source code for more info.

Args: SURPRISAL_MODELS (list[str]): Models to extract surprisal from unique_item_columns (list[str]): columns that make up a unique item (Path | None): Path to question difficulty data from prolific mode (Mode): whether to use interest area or fixation data

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
class ArgsParser(Tap):
    """Args parser for preprocessing.py

        Note, for fixation data, the X_IA_DWELL_TIME, for X in
        [total, min, max, part_total, part_min, part_max]
        columns are computed based on the CURRENT_FIX_DURATION column.

        Note, documentation was generated automatically. Please check the source code for more info.
    Args:
        SURPRISAL_MODELS (list[str]): Models to extract surprisal from
        unique_item_columns (list[str]): columns that make up a unique item
         (Path | None): Path to question difficulty data from prolific
        mode (Mode): whether to use interest area or fixation data
    """

    SURPRISAL_MODELS: list[str] = [
        'gpt2',
    ]  # Models to shift surprisal for

    onestopqa_path: Path = Path('metadata/onestop_qa.json')
    mode: Mode = Mode.IA  # whether to use interest area or fixation data

Mode

Bases: Enum

Enum for processing mode. Defines whether to process interest area (IA) or fixation data.

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
230
231
232
233
234
235
236
237
class Mode(Enum):
    """
    Enum for processing mode.
    Defines whether to process interest area (IA) or fixation data.
    """

    IA = 'ia'
    FIXATION = 'fixations'

OneStopProcessor

Bases: DatasetProcessor

Processor for OneStop dataset

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
class OneStopProcessor(DatasetProcessor):
    """Processor for OneStop dataset"""

    def get_column_map(self, data_type: DataType) -> dict:
        """Get column mapping for OneStop dataset"""
        return {
            # Empty for now as it's handled in onestop processing
        }

    def get_columns_to_keep(self) -> list:
        """Get list of columns to keep after filtering"""
        return list(
            set(
                list(Fields)
                + BaseModelArgs().word_features
                + BaseModelArgs().eye_features
                + BaseModelArgs().fixation_features
                + BEyeLSTMArgs().fixation_features
                + BEyeLSTMArgs().eye_features
                + BEyeLSTMArgs().word_features
                + ['unique_trial_id']
                + self.data_args.groupby_columns
                + [
                    'CURRENT_FIX_NEAREST_INTEREST_AREA_DISTANCE',
                    'IA_FIRST_RUN_LANDING_POSITION',
                    'IA_LAST_RUN_LANDING_POSITION',
                    'NEXT_SAC_START_Y',
                    'NEXT_SAC_END_X',
                    'NEXT_SAC_END_Y',
                    'NEXT_SAC_START_X',
                    'ptb_pos',
                    'is_content_word',
                    'LengthCategory',
                    'is_reg_sum',
                    'is_progressive_sum',
                    'IA_REGRESSION_IN_COUNT_sum',
                    'normalized_outgoing_regression_count',
                    'normalized_outgoing_progressive_count',
                    'normalized_incoming_regression_count',
                    'LengthCategory_normalized_IA_DWELL_TIME',
                    'universal_pos_normalized_IA_DWELL_TIME',
                    'LengthCategory_normalized_IA_FIRST_FIXATION_DURATION',
                    'universal_pos_normalized_IA_FIRST_FIXATION_DURATION',
                    'entity_type',
                ]
            )
        )

    def dataset_specific_processing(
        self, data_dict: dict[str, pd.DataFrame]
    ) -> dict[str, pd.DataFrame]:
        """OneStop-specific processing steps"""
        surprisal_models = ['gpt2']

        for data_type in [DataType.IA, DataType.FIXATIONS]:
            if data_type not in data_dict or data_dict[data_type] is None:
                continue

            df = data_dict[data_type]

            args = [
                '--mode',
                data_type,
                '--SURPRISAL_MODELS',
                *surprisal_models,
                '--onestopqa_path',
                str(self.data_args.onestopqa_path),
            ]
            cfg = ArgsParser().parse_args(args)
            if data_type == DataType.IA:
                df = self.query_onestop_data(df, query=self.data_args.ia_query)
            elif data_type == DataType.FIXATIONS:
                df = self.query_onestop_data(df, query=self.data_args.fixation_query)

            df = df.drop(columns=['ptb_pos']).rename(columns={'Reduced_POS': 'ptb_pos'})
            df = our_processing(df=df, args=cfg)

            # add unique_trial_id column
            df['unique_trial_id'] = (
                df['participant_id'].astype(str)
                + '_'
                + df['unique_paragraph_id'].astype(str)
                + '_'
                + df['repeated_reading_trial'].astype(str)
                + '_'
                + df['practice_trial'].astype(str)
            )

            # add is_correct column
            df['is_correct'] = (df.selected_answer == 'A').astype(int)

            df[Fields.LEVEL] = (
                df[Fields.LEVEL].replace({'Adv': 1, 'Ele': 0}).astype(int)
            )
            if data_type == DataType.IA:
                df['head_direction'] = df['distance_to_head'] > 0
                df['head_direction'] = df['head_direction'].astype(int)

            data_dict[data_type] = df

        data_dict['fixations'] = self.add_ia_report_features_to_fixation_data(
            data_dict['ia'], data_dict['fixations']
        )

        for data_type in [DataType.IA, DataType.FIXATIONS]:
            data_dict[data_type] = add_missing_features(
                et_data=data_dict[data_type],
                trial_groupby_columns=self.data_args.groupby_columns,
                mode=data_type,
            )

        trial_level_features = compute_trial_level_features(
            raw_fixation_data=data_dict[DataType.FIXATIONS],
            raw_ia_data=data_dict[DataType.IA],
            trial_groupby_columns=self.data_args.groupby_columns,
            processed_data_path=self.data_args.processed_data_path,
        )
        data_dict[DataType.TRIAL_LEVEL] = trial_level_features

        return data_dict

    def query_onestop_data(self, data: pd.DataFrame, query: str | None) -> pd.DataFrame:
        """Process the raw data by applying a query"""
        if query is not None:
            data = data.query(query)
            logger.info(f'Number of rows after query ({query}): {len(data)}')
        else:
            logger.info('***** No query! *****')
        return data

    def add_ia_report_features_to_fixation_data(
        self, ia_df: pd.DataFrame, fix_df: pd.DataFrame
    ) -> pd.DataFrame:
        """
        Merge per‑IA (interest‑area) features into the fixation‑level data.

        Result: one row per fixation, enriched with IA‑level attributes.
        """
        # --- 1. Unify IA‑ID column name ----------------------------------------
        ia_df = ia_df.rename(
            columns={
                Fields.IA_DATA_IA_ID_COL_NAME: Fields.FIXATION_REPORT_IA_ID_COL_NAME
            }
        )

        # --- 2. Build the list of IA features we plan to add -------------------
        ia_features = (
            BEyeLSTMArgs().ia_features_to_add_to_fixation_data
            + BaseModelArgs().ia_features_to_add_to_fixation_data
            + PLMASfArgs().ia_features_to_add_to_fixation_data
            + ['entity_type']
        )

        required_cols = (
            self.data_args.groupby_columns
            + [Fields.FIXATION_REPORT_IA_ID_COL_NAME]
            + ia_features
        )
        ia_df = ia_df[list(set(required_cols))]

        # --- 3. Drop columns that also exist in fixation table -----------------
        merge_keys = set(
            self.data_args.groupby_columns + [Fields.FIXATION_REPORT_IA_ID_COL_NAME]
        )
        dup_cols = (set(fix_df.columns) & set(ia_df.columns)) - merge_keys
        ia_df = ia_df.drop(columns=list(dup_cols))

        # --- 4. Clean nuisance column -----------------------------------------
        if 'normalized_part_ID' in fix_df.columns:
            if fix_df['normalized_part_ID'].isna().any():
                logger.warning('normalized_part_ID contains NaNs; dropping it.')
            fix_df = fix_df.drop(columns='normalized_part_ID')

        # --- 5. Merge ----------------------------------------------------------
        enriched_fix_df = fix_df.merge(
            ia_df,
            on=list(merge_keys),
            how='left',
            validate='many_to_one',
        )

        num_of_words_in_trials_series = ia_df.groupby(
            self.data_args.groupby_columns,
        ).apply(len)
        num_of_words_in_trials_series.name = 'num_of_words_in_trial'
        merge_keys = set(self.data_args.groupby_columns)
        enriched_fix_df = enriched_fix_df.merge(
            num_of_words_in_trials_series,
            on=self.data_args.groupby_columns,
            how='left',
        )

        return enriched_fix_df

add_ia_report_features_to_fixation_data(ia_df, fix_df)

Merge per‑IA (interest‑area) features into the fixation‑level data.

Result: one row per fixation, enriched with IA‑level attributes.

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
def add_ia_report_features_to_fixation_data(
    self, ia_df: pd.DataFrame, fix_df: pd.DataFrame
) -> pd.DataFrame:
    """
    Merge per‑IA (interest‑area) features into the fixation‑level data.

    Result: one row per fixation, enriched with IA‑level attributes.
    """
    # --- 1. Unify IA‑ID column name ----------------------------------------
    ia_df = ia_df.rename(
        columns={
            Fields.IA_DATA_IA_ID_COL_NAME: Fields.FIXATION_REPORT_IA_ID_COL_NAME
        }
    )

    # --- 2. Build the list of IA features we plan to add -------------------
    ia_features = (
        BEyeLSTMArgs().ia_features_to_add_to_fixation_data
        + BaseModelArgs().ia_features_to_add_to_fixation_data
        + PLMASfArgs().ia_features_to_add_to_fixation_data
        + ['entity_type']
    )

    required_cols = (
        self.data_args.groupby_columns
        + [Fields.FIXATION_REPORT_IA_ID_COL_NAME]
        + ia_features
    )
    ia_df = ia_df[list(set(required_cols))]

    # --- 3. Drop columns that also exist in fixation table -----------------
    merge_keys = set(
        self.data_args.groupby_columns + [Fields.FIXATION_REPORT_IA_ID_COL_NAME]
    )
    dup_cols = (set(fix_df.columns) & set(ia_df.columns)) - merge_keys
    ia_df = ia_df.drop(columns=list(dup_cols))

    # --- 4. Clean nuisance column -----------------------------------------
    if 'normalized_part_ID' in fix_df.columns:
        if fix_df['normalized_part_ID'].isna().any():
            logger.warning('normalized_part_ID contains NaNs; dropping it.')
        fix_df = fix_df.drop(columns='normalized_part_ID')

    # --- 5. Merge ----------------------------------------------------------
    enriched_fix_df = fix_df.merge(
        ia_df,
        on=list(merge_keys),
        how='left',
        validate='many_to_one',
    )

    num_of_words_in_trials_series = ia_df.groupby(
        self.data_args.groupby_columns,
    ).apply(len)
    num_of_words_in_trials_series.name = 'num_of_words_in_trial'
    merge_keys = set(self.data_args.groupby_columns)
    enriched_fix_df = enriched_fix_df.merge(
        num_of_words_in_trials_series,
        on=self.data_args.groupby_columns,
        how='left',
    )

    return enriched_fix_df

dataset_specific_processing(data_dict)

OneStop-specific processing steps

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def dataset_specific_processing(
    self, data_dict: dict[str, pd.DataFrame]
) -> dict[str, pd.DataFrame]:
    """OneStop-specific processing steps"""
    surprisal_models = ['gpt2']

    for data_type in [DataType.IA, DataType.FIXATIONS]:
        if data_type not in data_dict or data_dict[data_type] is None:
            continue

        df = data_dict[data_type]

        args = [
            '--mode',
            data_type,
            '--SURPRISAL_MODELS',
            *surprisal_models,
            '--onestopqa_path',
            str(self.data_args.onestopqa_path),
        ]
        cfg = ArgsParser().parse_args(args)
        if data_type == DataType.IA:
            df = self.query_onestop_data(df, query=self.data_args.ia_query)
        elif data_type == DataType.FIXATIONS:
            df = self.query_onestop_data(df, query=self.data_args.fixation_query)

        df = df.drop(columns=['ptb_pos']).rename(columns={'Reduced_POS': 'ptb_pos'})
        df = our_processing(df=df, args=cfg)

        # add unique_trial_id column
        df['unique_trial_id'] = (
            df['participant_id'].astype(str)
            + '_'
            + df['unique_paragraph_id'].astype(str)
            + '_'
            + df['repeated_reading_trial'].astype(str)
            + '_'
            + df['practice_trial'].astype(str)
        )

        # add is_correct column
        df['is_correct'] = (df.selected_answer == 'A').astype(int)

        df[Fields.LEVEL] = (
            df[Fields.LEVEL].replace({'Adv': 1, 'Ele': 0}).astype(int)
        )
        if data_type == DataType.IA:
            df['head_direction'] = df['distance_to_head'] > 0
            df['head_direction'] = df['head_direction'].astype(int)

        data_dict[data_type] = df

    data_dict['fixations'] = self.add_ia_report_features_to_fixation_data(
        data_dict['ia'], data_dict['fixations']
    )

    for data_type in [DataType.IA, DataType.FIXATIONS]:
        data_dict[data_type] = add_missing_features(
            et_data=data_dict[data_type],
            trial_groupby_columns=self.data_args.groupby_columns,
            mode=data_type,
        )

    trial_level_features = compute_trial_level_features(
        raw_fixation_data=data_dict[DataType.FIXATIONS],
        raw_ia_data=data_dict[DataType.IA],
        trial_groupby_columns=self.data_args.groupby_columns,
        processed_data_path=self.data_args.processed_data_path,
    )
    data_dict[DataType.TRIAL_LEVEL] = trial_level_features

    return data_dict

get_column_map(data_type)

Get column mapping for OneStop dataset

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
38
39
40
41
42
def get_column_map(self, data_type: DataType) -> dict:
    """Get column mapping for OneStop dataset"""
    return {
        # Empty for now as it's handled in onestop processing
    }

get_columns_to_keep()

Get list of columns to keep after filtering

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def get_columns_to_keep(self) -> list:
    """Get list of columns to keep after filtering"""
    return list(
        set(
            list(Fields)
            + BaseModelArgs().word_features
            + BaseModelArgs().eye_features
            + BaseModelArgs().fixation_features
            + BEyeLSTMArgs().fixation_features
            + BEyeLSTMArgs().eye_features
            + BEyeLSTMArgs().word_features
            + ['unique_trial_id']
            + self.data_args.groupby_columns
            + [
                'CURRENT_FIX_NEAREST_INTEREST_AREA_DISTANCE',
                'IA_FIRST_RUN_LANDING_POSITION',
                'IA_LAST_RUN_LANDING_POSITION',
                'NEXT_SAC_START_Y',
                'NEXT_SAC_END_X',
                'NEXT_SAC_END_Y',
                'NEXT_SAC_START_X',
                'ptb_pos',
                'is_content_word',
                'LengthCategory',
                'is_reg_sum',
                'is_progressive_sum',
                'IA_REGRESSION_IN_COUNT_sum',
                'normalized_outgoing_regression_count',
                'normalized_outgoing_progressive_count',
                'normalized_incoming_regression_count',
                'LengthCategory_normalized_IA_DWELL_TIME',
                'universal_pos_normalized_IA_DWELL_TIME',
                'LengthCategory_normalized_IA_FIRST_FIXATION_DURATION',
                'universal_pos_normalized_IA_FIRST_FIXATION_DURATION',
                'entity_type',
            ]
        )
    )

query_onestop_data(data, query)

Process the raw data by applying a query

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
156
157
158
159
160
161
162
163
def query_onestop_data(self, data: pd.DataFrame, query: str | None) -> pd.DataFrame:
    """Process the raw data by applying a query"""
    if query is not None:
        data = data.query(query)
        logger.info(f'Number of rows after query ({query}): {len(data)}')
    else:
        logger.info('***** No query! *****')
    return data

add_additional_metrics(df)

Add additional metrics to the DataFrame.

Adds columns for regression rate, total skip, and part length.

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with added metrics

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
def add_additional_metrics(df: pd.DataFrame) -> pd.DataFrame:
    """
    Add additional metrics to the DataFrame.

    Adds columns for regression rate, total skip, and part length.

    Args:
        df (pd.DataFrame): Input DataFrame

    Returns:
        pd.DataFrame: DataFrame with added metrics
    """

    logger.info('Adding additional metrics...')
    df['regression_rate'] = df['IA_REGRESSION_OUT_FULL_COUNT'] / df['IA_RUN_COUNT']
    df['total_skip'] = df['IA_DWELL_TIME'] == 0
    df['is_content_word'] = df['universal_pos'].apply(is_content_word)

    return df

add_unique_paragraph_id(df)

Add unique paragraph ID to the DataFrame.

Creates a new column 'unique_paragraph_id' by combining article_batch, article_id, difficulty_level, and paragraph_id.

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with added unique paragraph ID

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
def add_unique_paragraph_id(df: pd.DataFrame) -> pd.DataFrame:
    """
    Add unique paragraph ID to the DataFrame.

    Creates a new column 'unique_paragraph_id' by combining article_batch,
    article_id, difficulty_level, and paragraph_id.

    Args:
        df (pd.DataFrame): Input DataFrame

    Returns:
        pd.DataFrame: DataFrame with added unique paragraph ID
    """
    logger.info('Adding unique paragraph id...')
    df['unique_paragraph_id'] = (
        df[['article_batch', 'article_id', 'difficulty_level', 'paragraph_id']]
        .astype(str)
        .apply('_'.join, axis=1)
    )
    return df

adjust_indexing(df, args)

Adjust indexing to be 0-indexed.

Subtracts 1 from specified columns based on whether in IA or FIXATION mode.

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required
args ArgsParser

Contains mode configuration

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with adjusted indexing

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
def adjust_indexing(df: pd.DataFrame, args: ArgsParser) -> pd.DataFrame:
    """
    Adjust indexing to be 0-indexed.

    Subtracts 1 from specified columns based on whether in IA or FIXATION mode.

    Args:
        df (pd.DataFrame): Input DataFrame
        args (ArgsParser): Contains mode configuration

    Returns:
        pd.DataFrame: DataFrame with adjusted indexing
    """
    if args.mode == Mode.IA:
        subtract_one_fields = [IA_ID_COL]
    elif args.mode == Mode.FIXATION:
        subtract_one_fields = [
            FIXATION_ID_COL,
            NEXT_FIXATION_ID_COL,
        ]
    else:
        raise ValueError(f'Unknown mode: {args.mode}')

    df[subtract_one_fields] -= 1
    logger.info('%s values adjusted to be 0-indexed.', subtract_one_fields)
    return df

compute_normalized_features(df, duration_col, ia_field)

Calculate normalized versions of key metrics.

Adds columns for: - Normalized dwell times (total and by part) - Normalized word positions (total and by part) - Reverse indices from end

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required
duration_col str

Column name for duration values

required
ia_field str

Column name for word/fixation index

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with normalized metrics

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
def compute_normalized_features(
    df: pd.DataFrame, duration_col: str, ia_field: str
) -> pd.DataFrame:
    """
    Calculate normalized versions of key metrics.

    Adds columns for:
    - Normalized dwell times (total and by part)
    - Normalized word positions (total and by part)
    - Reverse indices from end

    Args:
        df (pd.DataFrame): Input DataFrame
        duration_col (str): Column name for duration values
        ia_field (str): Column name for word/fixation index

    Returns:
        pd.DataFrame: DataFrame with normalized metrics
    """
    logger.info('Computing normalized dwell time, and normalized word indices...')
    df = df.assign(
        normalized_ID=(df[ia_field] - df.min_IA_ID) / (df.max_IA_ID - df.min_IA_ID),
    ).copy()
    return df

compute_span_level_metrics(df, ia_field, mode, duration_col)

Calculate aggregated metrics for different text spans.

Computes: - Total dwell time per trial/span - Min/max word indices per trial/span - For fixations: count per span - Normalizes indices to start at 0

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required
ia_field str

Column name for word/fixation index

required
mode Mode

IA or FIXATION processing mode

required
duration_col str

Column name for duration values

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with added span-level metrics

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
def compute_span_level_metrics(
    df: pd.DataFrame, ia_field: str, mode: Mode, duration_col: str
) -> pd.DataFrame:
    """
    Calculate aggregated metrics for different text spans.

    Computes:
    - Total dwell time per trial/span
    - Min/max word indices per trial/span
    - For fixations: count per span
    - Normalizes indices to start at 0

    Args:
        df (pd.DataFrame): Input DataFrame
        ia_field (str): Column name for word/fixation index
        mode (Mode): IA or FIXATION processing mode
        duration_col (str): Column name for duration values

    Returns:
        pd.DataFrame: DataFrame with added span-level metrics
    """
    logger.info('Computing span-level metrics...')

    group_by_fields = [
        'participant_id',
        'unique_paragraph_id',
        'repeated_reading_trial',
    ]

    # Fix trials where ID does not start at 0
    if mode == Mode.IA:
        temp_max_per_trial = df.groupby(group_by_fields).agg(
            min_IA_ID=pd.NamedAgg(column=ia_field, aggfunc='min'),
            max_IA_ID=pd.NamedAgg(column=ia_field, aggfunc='max'),
        )
        non_zero_min_ia_id_trials = temp_max_per_trial[
            temp_max_per_trial['min_IA_ID'] != 0
        ]
        logger.info(
            'Number of trials where min_IA_ID is not zero: %d out of %d trials.',
            len(non_zero_min_ia_id_trials),
            len(temp_max_per_trial),
        )
        df = df.merge(
            temp_max_per_trial,
            on=group_by_fields,
            validate='m:1',
            suffixes=(None, '_y'),
        )
        logger.info('Shifting IA_ID to start at 0...')
        df[ia_field] -= df['min_IA_ID']
        df.drop(columns=['min_IA_ID', 'max_IA_ID'], inplace=True)

    max_per_trial = df.groupby(group_by_fields).agg(
        total_IA_DWELL_TIME=pd.NamedAgg(column=duration_col, aggfunc='sum'),
        min_IA_ID=pd.NamedAgg(column=ia_field, aggfunc='min'),
        max_IA_ID=pd.NamedAgg(column=ia_field, aggfunc='max'),
    )
    df = df.merge(
        max_per_trial, on=group_by_fields, validate='m:1', suffixes=(None, '_y')
    )
    return df

compute_start_end_line(df)

Compute for each word whether it the first/last word in the line (not sentence!).

This function adds two new columns to the input DataFrame: 'start_of_line' and 'end_of_line'. A word is considered to be at the start of a line if its 'IA_LEFT' value is smaller than the previous word's. A word is considered to be at the end of a line if its 'IA_LEFT' value is larger than the next word's.

df (pd.DataFrame): Input DataFrame. Must contain the columns 'participant_id', 'unique_paragraph_id', and 'IA_LEFT'.

Returns: pd.DataFrame: The input DataFrame with two new columns: 'start_of_line' and 'end_of_line'.

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
def compute_start_end_line(df: pd.DataFrame) -> pd.DataFrame:
    """
    Compute for each word  whether it the first/last word in the line (not sentence!).

    This function adds two new columns to the input DataFrame: 'start_of_line' and 'end_of_line'.
    A word is considered to be at the start of a line if its
        'IA_LEFT' value is smaller than the previous word's.
    A word is considered to be at the end of a line if its
        'IA_LEFT' value is larger than the next word's.

    Parameters:
    df (pd.DataFrame): Input DataFrame. Must contain the columns 'participant_id',
        'unique_paragraph_id', and 'IA_LEFT'.

    Returns:
    pd.DataFrame: The input DataFrame with two new columns: 'start_of_line' and 'end_of_line'.
    """

    logger.info('Adding start_of_line and end_of_line columns...')
    grouped_df = df.groupby(
        ['participant_id', 'unique_paragraph_id', 'repeated_reading_trial']
    )
    df['start_of_line'] = (
        grouped_df['IA_LEFT'].shift(periods=1, fill_value=1000000) > df['IA_LEFT']
    )
    df['end_of_line'] = (
        grouped_df['IA_LEFT'].shift(periods=-1, fill_value=-1) < df['IA_LEFT']
    )
    return df

convert_to_float_features(df, args)

Convert specified columns to float type.

Handles missing values and dots by replacing them with None before conversion. Different columns are processed based on whether in IA or FIXATION mode.

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required
args ArgsParser

Contains mode configuration

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with converted float columns

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
def convert_to_float_features(df: pd.DataFrame, args: ArgsParser) -> pd.DataFrame:
    """
    Convert specified columns to float type.

    Handles missing values and dots by replacing them with None before conversion.
    Different columns are processed based on whether in IA or FIXATION mode.

    Args:
        df (pd.DataFrame): Input DataFrame
        args (ArgsParser): Contains mode configuration

    Returns:
        pd.DataFrame: DataFrame with converted float columns
    """
    if args.mode == Mode.IA:
        to_float_features = [
            'IA_AVERAGE_FIX_PUPIL_SIZE',
            'IA_DWELL_TIME_%',
            'IA_FIXATION_%',
            'IA_FIRST_RUN_FIXATION_%',
            'IA_FIRST_SACCADE_AMPLITUDE',
            'IA_FIRST_SACCADE_ANGLE',
            'IA_LAST_RUN_FIXATION_%',
            'IA_LAST_SACCADE_AMPLITUDE',
            'IA_LAST_SACCADE_ANGLE',
            'IA_FIRST_RUN_LANDING_POSITION',
            'IA_LAST_RUN_LANDING_POSITION',
        ]
    elif args.mode == Mode.FIXATION:
        to_float_features = [
            FIXATION_ID_COL,
            NEXT_FIXATION_ID_COL,
            'NEXT_FIX_ANGLE',
            'PREVIOUS_FIX_ANGLE',
            'NEXT_FIX_DISTANCE',
            'PREVIOUS_FIX_DISTANCE',
            'NEXT_SAC_AMPLITUDE',
            'NEXT_SAC_ANGLE',
            'NEXT_SAC_AVG_VELOCITY',
            'NEXT_SAC_PEAK_VELOCITY',
            'NEXT_SAC_END_X',
            'NEXT_SAC_START_X',
            'NEXT_SAC_END_Y',
            'NEXT_SAC_START_Y',
        ]
    else:
        raise ValueError(f'Unknown mode: {args.mode}')
    df[to_float_features] = (
        df[to_float_features].replace(to_replace={'.': None}).astype(float)
    )
    logger.info(
        "%s fields converted to float, nan ('.') values replaced with None.",
        to_float_features,
    )
    return df

convert_to_int_features(df, args)

Convert specified columns to integer type.

Handles missing values and dots by replacing them with 0 before conversion. Different columns are processed based on whether in IA or FIXATION mode.

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required
args ArgsParser

Contains mode configuration

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with converted integer columns

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
def convert_to_int_features(df: pd.DataFrame, args: ArgsParser) -> pd.DataFrame:
    """
    Convert specified columns to integer type.

    Handles missing values and dots by replacing them with 0 before conversion.
    Different columns are processed based on whether in IA or FIXATION mode.

    Args:
        df (pd.DataFrame): Input DataFrame
        args (ArgsParser): Contains mode configuration

    Returns:
        pd.DataFrame: DataFrame with converted integer columns
    """
    # In general, only features that have '.' or NaN or not automatically converted.

    to_int_features = [
        'article_batch',
        'article_id',
        'paragraph_id',
        'repeated_reading_trial',
        'practice_trial',
        # "question_preview",
    ]
    if args.mode == Mode.IA:
        to_int_features += [
            'IA_DWELL_TIME',
            'IA_FIRST_FIXATION_DURATION',
            'IA_REGRESSION_PATH_DURATION',
            'IA_FIRST_RUN_DWELL_TIME',
            'IA_FIXATION_COUNT',
            'IA_REGRESSION_IN_COUNT',
            'IA_REGRESSION_OUT_FULL_COUNT',
            'IA_RUN_COUNT',
            'IA_FIRST_FIXATION_VISITED_IA_COUNT',
            'IA_FIRST_RUN_FIXATION_COUNT',
            'IA_SKIP',
            'IA_REGRESSION_OUT_COUNT',
            'IA_SELECTIVE_REGRESSION_PATH_DURATION',
            'IA_SPILLOVER',
            'IA_LAST_FIXATION_DURATION',
            'IA_LAST_RUN_DWELL_TIME',
            'IA_LAST_RUN_FIXATION_COUNT',
            'IA_LEFT',
            'IA_TOP',
            'TRIAL_DWELL_TIME',
            'TRIAL_FIXATION_COUNT',
            'TRIAL_IA_COUNT',
            'TRIAL_INDEX',
            'TRIAL_TOTAL_VISITED_IA_COUNT',
            'IA_FIRST_FIX_PROGRESSIVE',
        ]
    elif args.mode == Mode.FIXATION:
        to_int_features += [
            FIXATION_ID_COL,
            NEXT_FIXATION_ID_COL,
            'CURRENT_FIX_DURATION',
            'CURRENT_FIX_PUPIL',
            'CURRENT_FIX_X',
            'CURRENT_FIX_Y',
            'CURRENT_FIX_INDEX',
            'NEXT_SAC_DURATION',
        ]
    df[to_int_features] = df[to_int_features].replace({'.': 0, np.nan: 0}).astype(int)
    logger.info(
        "%s fields converted to int, nan ('.') values replaced with 0.", to_int_features
    )
    return df

drop_missing_fixation_data(df, args)

Drop rows with missing fixation data.

Drops rows with missing values in specified columns for FIXATION mode.

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame

required
args ArgsParser

Contains mode configuration

required

Returns:

Type Description
DataFrame

pd.DataFrame: DataFrame with dropped rows

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
def drop_missing_fixation_data(df: pd.DataFrame, args: ArgsParser) -> pd.DataFrame:
    """
    Drop rows with missing fixation data.

    Drops rows with missing values in specified columns for FIXATION mode.

    Args:
        df (pd.DataFrame): Input DataFrame
        args (ArgsParser): Contains mode configuration

    Returns:
        pd.DataFrame: DataFrame with dropped rows
    """
    if args.mode == Mode.FIXATION:
        dropna_fields = [FIXATION_ID_COL, NEXT_FIXATION_ID_COL]
        df = df.dropna(subset=dropna_fields)
        logger.info(
            'After dropping rows with missing data in %s: %d records left in total.',
            dropna_fields,
            len(df),
        )
    return df

get_article_data(article_id, raw_text)

Retrieve article data from raw text by article ID.

Parameters:

Name Type Description Default
article_id str

Article identifier to look up

required
raw_text dict

Raw text data containing articles

required

Returns:

Name Type Description
dict dict

Article data if found

Raises:

Type Description
ValueError

If article ID not found

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
def get_article_data(article_id: str, raw_text) -> dict:
    """
    Retrieve article data from raw text by article ID.

    Args:
        article_id (str): Article identifier to look up
        raw_text (dict): Raw text data containing articles

    Returns:
        dict: Article data if found

    Raises:
        ValueError: If article ID not found
    """
    for article in raw_text:
        if article['article_id'] == article_id:
            return article
    raise ValueError(f'Article id {article_id} not found')

get_constants_by_mode(mode)

Get constants based on processing mode.

Returns duration and IA field names based on whether in IA or FIXATION mode.

Parameters:

Name Type Description Default
mode Mode

Processing mode (IA or FIXATION)

required

Returns:

Type Description
tuple[str, str]

tuple[str, str]: Duration and IA field names

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
def get_constants_by_mode(mode: Mode) -> tuple[str, str]:
    """
    Get constants based on processing mode.

    Returns duration and IA field names based on whether in IA or FIXATION mode.

    Args:
        mode (Mode): Processing mode (IA or FIXATION)

    Returns:
        tuple[str, str]: Duration and IA field names
    """
    duration_field = 'IA_DWELL_TIME' if mode == Mode.IA else 'CURRENT_FIX_DURATION'
    ia_field = IA_ID_COL if mode == Mode.IA else FIXATION_ID_COL

    return duration_field, ia_field

get_raw_text(args)

Load raw text data from OneStopQA JSON file.

Parameters:

Name Type Description Default
args object

Configuration containing onestopqa_path

required

Returns:

Name Type Description
dict dict

Raw text data from OneStopQA JSON

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
def get_raw_text(args: object) -> dict:
    """
    Load raw text data from OneStopQA JSON file.

    Args:
        args: Configuration containing onestopqa_path

    Returns:
        dict: Raw text data from OneStopQA JSON
    """
    with open(
        file=args.onestopqa_path,
        mode='r',
        encoding='utf-8',
    ) as f:
        raw_text = json.load(f)
    return raw_text['data']

our_processing(df, args)

LaCC lab-specific processing pipeline for OneStop dataset.

Extends the public dataset with additional features including: - Integer and float feature conversions - Index adjustments - Fixation data cleaning - Unique paragraph ID addition - Word span metrics computation - Span-level metrics computation - Feature normalization - Question difficulty data integration - Previous word metrics (for IA mode) - Line position metrics (for IA mode)

Parameters:

Name Type Description Default
df DataFrame

Input DataFrame from public preprocessing

required
args ArgsParser

Configuration parameters

required

Returns:

Type Description
DataFrame

pd.DataFrame: Extended DataFrame with LaCC lab features

Source code in src/data/preprocessing/dataset_preprocessing/onestop.py
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
def our_processing(df: pd.DataFrame, args: ArgsParser) -> pd.DataFrame:
    """
    LaCC lab-specific processing pipeline for OneStop dataset.

    Extends the public dataset with additional features including:
    - Integer and float feature conversions
    - Index adjustments
    - Fixation data cleaning
    - Unique paragraph ID addition
    - Word span metrics computation
    - Span-level metrics computation
    - Feature normalization
    - Question difficulty data integration
    - Previous word metrics (for IA mode)
    - Line position metrics (for IA mode)

    Args:
        df (pd.DataFrame): Input DataFrame from public preprocessing
        args (ArgsParser): Configuration parameters

    Returns:
        pd.DataFrame: Extended DataFrame with LaCC lab features
    """

    duration_field, ia_field = get_constants_by_mode(args.mode)

    df = convert_to_int_features(df, args)
    df = convert_to_float_features(df, args)
    df = adjust_indexing(df, args)
    df = drop_missing_fixation_data(df, args)
    df = add_unique_paragraph_id(df)
    df = compute_span_level_metrics(df, ia_field, args.mode, duration_field)
    df = compute_normalized_features(df, duration_field, ia_field)
    if args.mode == Mode.IA:
        df = compute_start_end_line(df)
        df = add_additional_metrics(df)

    return df