File size: 13,528 Bytes
fc69ace
 
 
493c56b
 
 
5aca5c0
493c56b
f94ac50
4f28711
 
 
493c56b
 
 
 
 
4f28711
 
15fc415
94ef62e
5aca5c0
a3edf70
cff7de9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc69ace
 
 
 
 
c5c1684
fc69ace
 
 
c170de8
0527288
94ef62e
 
9cb582a
94ef62e
fc69ace
 
 
c5c1684
fc69ace
 
15fc415
13ce420
c170de8
0527288
13632f1
13ce420
2d47e8d
d6463f0
15fc415
13ce420
15fc415
c5c1684
13632f1
0527288
 
af3b1cb
0527288
c5c1684
13ce420
5aca5c0
15dfda6
5aca5c0
 
 
13ce420
5aca5c0
13ce420
5aca5c0
 
d6463f0
12bfc52
d6463f0
 
 
 
 
5aca5c0
 
 
1ebf888
15dfda6
 
15fc415
0781385
2f01651
15dfda6
2f01651
0781385
15fc415
15dfda6
5aca5c0
a3edf70
 
13ce420
15dfda6
 
 
4f28711
15dfda6
 
 
 
 
13ce420
15dfda6
 
 
 
 
0781385
15dfda6
13ce420
0781385
 
15dfda6
 
 
 
 
 
 
 
 
13ce420
15dfda6
5aca5c0
15dfda6
 
 
13ce420
0781385
 
15dfda6
 
d6463f0
 
 
 
 
12bfc52
d6463f0
4f28711
d6463f0
 
 
12bfc52
d6463f0
4f28711
d6463f0
 
4f28711
 
15fc415
13ce420
15fc415
4f28711
5c0397c
 
 
 
 
 
 
 
 
 
 
 
4f28711
 
 
 
44216e4
5c0397c
44216e4
d6463f0
5c0397c
 
44216e4
4f28711
 
 
 
5c0397c
13ce420
 
 
 
4f28711
 
 
5c0397c
4f28711
 
5c0397c
 
 
 
13ce420
5c0397c
 
 
 
 
 
 
 
 
13ce420
5c0397c
13ce420
 
c3a7c91
13ce420
 
5c0397c
 
 
13ce420
5c0397c
13ce420
 
 
 
5c0397c
 
 
 
 
 
 
 
 
 
c3a7c91
 
 
 
 
5c0397c
 
 
 
 
 
 
 
4280545
23ff24b
 
 
 
13ce420
23ff24b
13ce420
 
c3a7c91
13ce420
 
23ff24b
 
 
13ce420
23ff24b
13ce420
 
 
 
23ff24b
 
 
 
 
 
 
 
 
 
c3a7c91
 
 
 
 
23ff24b
 
 
 
 
 
 
 
 
4280545
 
 
 
 
 
 
c3a7c91
 
 
 
 
4280545
 
 
a2fc10c
 
 
 
 
13ce420
a2fc10c
13ce420
 
c3a7c91
13ce420
 
a2fc10c
 
 
 
 
 
 
 
 
 
c3a7c91
 
 
 
 
a2fc10c
 
c3a7c91
a2fc10c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
//! This module provides the functionality to scrape and gathers all the results from the upstream
//! search engines and then removes duplicate results.

use super::user_agent::random_user_agent;
use crate::handler::paths::{file_path, FileType};
use crate::models::{
    aggregation_models::{EngineErrorInfo, SearchResult, SearchResults},
    engine_models::{EngineError, EngineHandler},
};
use error_stack::Report;
use rand::Rng;
use regex::Regex;
use std::{
    collections::HashMap,
    io::{BufReader, Read},
    time::Duration,
};
use std::{fs::File, io::BufRead};
use tokio::task::JoinHandle;

/// Aliases for long type annotations
type FutureVec = Vec<JoinHandle<Result<HashMap<String, SearchResult>, Report<EngineError>>>>;

/// The function aggregates the scraped results from the user-selected upstream search engines.
/// These engines can be chosen either from the user interface (UI) or from the configuration file.
/// The code handles this process by matching the selected search engines and adding them to a vector.
/// This vector is then used to create an asynchronous task vector using `tokio::spawn`, which returns
/// a future. This future is awaited in another loop. Once the results are collected, they are filtered
/// to remove any errors and ensure only proper results are included. If an error is encountered, it is
/// sent to the UI along with the name of the engine and the type of error. This information is finally
/// placed in the returned `SearchResults` struct.
///
/// Additionally, the function eliminates duplicate results. If two results are identified as coming from
/// multiple engines, their names are combined to indicate that the results were fetched from these upstream
/// engines. After this, all the data in the `HashMap` is removed and placed into a struct that contains all
/// the aggregated results in a vector. Furthermore, the query used is also added to the struct. This step is
/// necessary to ensure that the search bar in the search remains populated even when searched from the query URL.
///
/// Overall, this function serves to aggregate scraped results from user-selected search engines, handling errors,
/// removing duplicates, and organizing the data for display in the UI.
///
/// # Example:
///
/// If you search from the url like `https://127.0.0.1/search?q=huston` then the search bar should
/// contain the word huston and not remain empty.
///
/// # Arguments
///
/// * `query` - Accepts a string to query with the above upstream search engines.
/// * `page` - Accepts an u32 page number.
/// * `random_delay` - Accepts a boolean value to add a random delay before making the request.
/// * `debug` - Accepts a boolean value to enable or disable debug mode option.
/// * `upstream_search_engines` - Accepts a vector of search engine names which was selected by the
/// * `request_timeout` - Accepts a time (secs) as a value which controls the server request timeout.
/// user through the UI or the config file.
///
/// # Error
///
/// Returns an error a reqwest and scraping selector errors if any error occurs in the results
/// function in either `searx` or `duckduckgo` or both otherwise returns a `SearchResults struct`
/// containing appropriate values.
pub async fn aggregate(
    query: &str,
    page: u32,
    random_delay: bool,
    debug: bool,
    upstream_search_engines: &[EngineHandler],
    request_timeout: u8,
    safe_search: u8,
) -> Result<SearchResults, Box<dyn std::error::Error>> {
    let user_agent: &str = random_user_agent();

    // Add a random delay before making the request.
    if random_delay || !debug {
        let mut rng = rand::thread_rng();
        let delay_secs = rng.gen_range(1..10);
        tokio::time::sleep(Duration::from_secs(delay_secs)).await;
    }

    let mut names: Vec<&str> = Vec::with_capacity(0);

    // create tasks for upstream result fetching
    let mut tasks: FutureVec = FutureVec::new();

    for engine_handler in upstream_search_engines {
        let (name, search_engine) = engine_handler.to_owned().into_name_engine();
        names.push(name);
        let query: String = query.to_owned();
        tasks.push(tokio::spawn(async move {
            search_engine
                .results(
                    &query,
                    page,
                    user_agent.clone(),
                    request_timeout,
                    safe_search,
                )
                .await
        }));
    }

    // get upstream responses
    let mut responses = Vec::with_capacity(tasks.len());

    for task in tasks {
        if let Ok(result) = task.await {
            responses.push(result)
        }
    }

    // aggregate search results, removing duplicates and handling errors the upstream engines returned
    let mut result_map: HashMap<String, SearchResult> = HashMap::new();
    let mut engine_errors_info: Vec<EngineErrorInfo> = Vec::new();

    let mut handle_error = |error: &Report<EngineError>, engine_name: &'static str| {
        log::error!("Engine Error: {:?}", error);
        engine_errors_info.push(EngineErrorInfo::new(
            error.downcast_ref::<EngineError>().unwrap(),
            engine_name,
        ));
    };

    for _ in 0..responses.len() {
        let response = responses.pop().unwrap();
        let engine = names.pop().unwrap();

        if result_map.is_empty() {
            match response {
                Ok(results) => {
                    result_map = results.clone();
                }
                Err(error) => {
                    handle_error(&error, engine);
                }
            }
            continue;
        }

        match response {
            Ok(result) => {
                result.into_iter().for_each(|(key, value)| {
                    result_map
                        .entry(key)
                        .and_modify(|result| {
                            result.add_engines(engine);
                        })
                        .or_insert_with(|| -> SearchResult { value });
                });
            }
            Err(error) => {
                handle_error(&error, engine);
            }
        }
    }

    if safe_search >= 3 {
        let mut blacklist_map: HashMap<String, SearchResult> = HashMap::new();
        filter_with_lists(
            &mut result_map,
            &mut blacklist_map,
            file_path(FileType::BlockList)?,
        )?;

        filter_with_lists(
            &mut blacklist_map,
            &mut result_map,
            file_path(FileType::AllowList)?,
        )?;

        drop(blacklist_map);
    }

    let results: Vec<SearchResult> = result_map.into_values().collect();

    Ok(SearchResults::new(results, query, &engine_errors_info))
}

/// Filters a map of search results using a list of regex patterns.
///
/// # Arguments
///
/// * `map_to_be_filtered` - A mutable reference to a `HashMap` of search results to filter, where the filtered results will be removed from.
/// * `resultant_map` - A mutable reference to a `HashMap` to hold the filtered results.
/// * `file_path` - A `&str` representing the path to a file containing regex patterns to use for filtering.
///
/// # Errors
///
/// Returns an error if the file at `file_path` cannot be opened or read, or if a regex pattern is invalid.
pub fn filter_with_lists(
    map_to_be_filtered: &mut HashMap<String, SearchResult>,
    resultant_map: &mut HashMap<String, SearchResult>,
    file_path: &str,
) -> Result<(), Box<dyn std::error::Error>> {
    let mut reader = BufReader::new(File::open(file_path)?);

    for line in reader.by_ref().lines() {
        let re = Regex::new(line?.trim())?;

        // Iterate over each search result in the map and check if it matches the regex pattern
        for (url, search_result) in map_to_be_filtered.clone().into_iter() {
            if re.is_match(&url.to_lowercase())
                || re.is_match(&search_result.title.to_lowercase())
                || re.is_match(&search_result.description.to_lowercase())
            {
                // If the search result matches the regex pattern, move it from the original map to the resultant map
                resultant_map.insert(
                    url.to_owned(),
                    map_to_be_filtered.remove(&url.to_owned()).unwrap(),
                );
            }
        }
    }

    Ok(())
}

#[cfg(test)]
mod tests {
    use super::*;
    use smallvec::smallvec;
    use std::collections::HashMap;
    use std::io::Write;
    use tempfile::NamedTempFile;

    #[test]
    fn test_filter_with_lists() -> Result<(), Box<dyn std::error::Error>> {
        // Create a map of search results to filter
        let mut map_to_be_filtered = HashMap::new();
        map_to_be_filtered.insert(
            "https://www.example.com".to_owned(),
            SearchResult {
                title: "Example Domain".to_owned(),
                url: "https://www.example.com".to_owned(),
                description: "This domain is for use in illustrative examples in documents."
                    .to_owned(),
                engine: smallvec!["Google".to_owned(), "Bing".to_owned()],
            },
        );
        map_to_be_filtered.insert(
            "https://www.rust-lang.org/".to_owned(),
            SearchResult {
                title: "Rust Programming Language".to_owned(),
                url: "https://www.rust-lang.org/".to_owned(),
                description: "A systems programming language that runs blazingly fast, prevents segfaults, and guarantees thread safety.".to_owned(),
                engine: smallvec!["Google".to_owned(), "DuckDuckGo".to_owned()],
            },
        );

        // Create a temporary file with regex patterns
        let mut file = NamedTempFile::new()?;
        writeln!(file, "example")?;
        writeln!(file, "rust")?;
        file.flush()?;

        let mut resultant_map = HashMap::new();
        filter_with_lists(
            &mut map_to_be_filtered,
            &mut resultant_map,
            file.path().to_str().unwrap(),
        )?;

        assert_eq!(resultant_map.len(), 2);
        assert!(resultant_map.contains_key("https://www.example.com"));
        assert!(resultant_map.contains_key("https://www.rust-lang.org/"));
        assert_eq!(map_to_be_filtered.len(), 0);

        Ok(())
    }

    #[test]
    fn test_filter_with_lists_wildcard() -> Result<(), Box<dyn std::error::Error>> {
        let mut map_to_be_filtered = HashMap::new();
        map_to_be_filtered.insert(
            "https://www.example.com".to_owned(),
            SearchResult {
                title: "Example Domain".to_owned(),
                url: "https://www.example.com".to_owned(),
                description: "This domain is for use in illustrative examples in documents."
                    .to_owned(),
                engine: smallvec!["Google".to_owned(), "Bing".to_owned()],
            },
        );
        map_to_be_filtered.insert(
            "https://www.rust-lang.org/".to_owned(),
            SearchResult {
                title: "Rust Programming Language".to_owned(),
                url: "https://www.rust-lang.org/".to_owned(),
                description: "A systems programming language that runs blazingly fast, prevents segfaults, and guarantees thread safety.".to_owned(),
                engine: smallvec!["Google".to_owned(), "DuckDuckGo".to_owned()],
            },
        );

        // Create a temporary file with a regex pattern containing a wildcard
        let mut file = NamedTempFile::new()?;
        writeln!(file, "ex.*le")?;
        file.flush()?;

        let mut resultant_map = HashMap::new();

        filter_with_lists(
            &mut map_to_be_filtered,
            &mut resultant_map,
            file.path().to_str().unwrap(),
        )?;

        assert_eq!(resultant_map.len(), 1);
        assert!(resultant_map.contains_key("https://www.example.com"));
        assert_eq!(map_to_be_filtered.len(), 1);
        assert!(map_to_be_filtered.contains_key("https://www.rust-lang.org/"));

        Ok(())
    }

    #[test]
    fn test_filter_with_lists_file_not_found() {
        let mut map_to_be_filtered = HashMap::new();

        let mut resultant_map = HashMap::new();

        // Call the `filter_with_lists` function with a non-existent file path
        let result = filter_with_lists(
            &mut map_to_be_filtered,
            &mut resultant_map,
            "non-existent-file.txt",
        );

        assert!(result.is_err());
    }

    #[test]
    fn test_filter_with_lists_invalid_regex() {
        let mut map_to_be_filtered = HashMap::new();
        map_to_be_filtered.insert(
            "https://www.example.com".to_owned(),
            SearchResult {
                title: "Example Domain".to_owned(),
                url: "https://www.example.com".to_owned(),
                description: "This domain is for use in illustrative examples in documents."
                    .to_owned(),
                engine: smallvec!["Google".to_owned(), "Bing".to_owned()],
            },
        );

        let mut resultant_map = HashMap::new();

        // Create a temporary file with an invalid regex pattern
        let mut file = NamedTempFile::new().unwrap();
        writeln!(file, "example(").unwrap();
        file.flush().unwrap();

        let result = filter_with_lists(
            &mut map_to_be_filtered,
            &mut resultant_map,
            file.path().to_str().unwrap(),
        );

        assert!(result.is_err());
    }
}