Appearance
question:def is_self_dividing_number(number: int) -> bool: Checks if a number is a self-dividing number. A self-dividing number is a number that is divisible by every digit it contains. :param number: Positive integer to check. :return: Boolean indicating whether the number is self-dividing. Examples: >>> is_self_dividing_number(1) True >>> is_self_dividing_number(12) True >>> is_self_dividing_number(128) True >>> is_self_dividing_number(26) False >>> is_self_dividing_number(101) False from solution import is_self_dividing_number def test_is_self_dividing_number_single_digit(): assert is_self_dividing_number(1) == True assert is_self_dividing_number(5) == True assert is_self_dividing_number(7) == True def test_is_self_dividing_number_multiple_digits(): assert is_self_dividing_number(12) == True assert is_self_dividing_number(128) == True assert is_self_dividing_number(36) == True assert is_self_dividing_number(101) == False assert is_self_dividing_number(26) == False def test_is_self_dividing_number_with_zero_digit(): assert is_self_dividing_number(10) == False assert is_self_dividing_number(20) == False assert is_self_dividing_number(30) == False def test_is_self_dividing_number_edge_cases(): assert is_self_dividing_number(10000) == False assert is_self_dividing_number(9999) == True assert is_self_dividing_number(1) == True assert is_self_dividing_number(11) == True assert is_self_dividing_number(21) == False
answer:def is_self_dividing_number(number: int) -> bool: Checks if a number is a self-dividing number. A self-dividing number is a number that is divisible by every digit it contains. :param number: Positive integer to check. :return: Boolean indicating whether the number is self-dividing. original_number = number while number > 0: digit = number % 10 if digit == 0 or original_number % digit != 0: return False number //= 10 return True
question:# Creating module `solution` Code Implementation import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.datasets import load_files def preprocess_data(reviews: np.ndarray, labels: np.ndarray) -> tuple: Tokenizes and processes the text data, transforming it into numerical features using TF-IDF vectorization. Returns the vectorized features and corresponding labels. # Handle any missing values in the reviews reviews = np.array([review if isinstance(review, str) else "" for review in reviews]) vectorizer = TfidfVectorizer(stop_words="english") features = vectorizer.fit_transform(reviews) return features, labels def logistic_regression(features: np.ndarray, labels: np.ndarray, test_features: np.ndarray) -> np.ndarray: Initializes and trains a logistic regression model on the training features and labels. Predicts the sentiment for the given test features and returns the predictions. model = LogisticRegression(random_state=42, max_iter=1000) model.fit(features, labels) predictions = model.predict(test_features) return predictions def main() -> None: # Load IMDB dataset reviews_data = load_files('path/to/imdb/reviews', shuffle=True) reviews, labels = reviews_data.data, reviews_data.target # Preprocess the dataset features, labels = preprocess_data(reviews, labels) x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, random_state=42) # Train and predict predictions = logistic_regression(x_train, y_train, x_test) # Output results print(f"Accuracy: {accuracy_score(y_test, predictions)}") print(f"Confusion Matrix:n{confusion_matrix(y_test, predictions)}") if __name__ == "__main__": main() Unit Tests import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from unittest.mock import patch def test_preprocess_data(): from solution import preprocess_data reviews = np.array(["Good movie!", "Bad film.", np.nan, "Awesome!", "Terrible..."]) labels = np.array([1, 0, 0, 1, 0]) # Preprocess the data features, processed_labels = preprocess_data(reviews, labels) # Verify the shape and ensure no nans in the reviews assert features.shape[0] == len(reviews), "Features shape mismatch" assert np.array_equal(labels, processed_labels), "Labels mismatch" def test_logistic_regression(): from solution import preprocess_data, logistic_regression # Sample data reviews = np.array(["Good movie!", "Bad film.", "Awesome!", "Terrible..."]) labels = np.array([1, 0, 1, 0]) # Preprocess the data features, processed_labels = preprocess_data(reviews, labels) x_train, x_test, y_train, y_test = train_test_split(features, processed_labels, test_size=0.25, random_state=42) # Train and predict predictions = logistic_regression(x_train, y_train, x_test) # Ensure predictions have the correct shape assert len(predictions) == len(y_test), "Predictions shape mismatch" def test_main(monkeypatch): import solution from solution import main # Mocking load_files to prevent actual IO operations def mock_load_files(*args, **kwargs): return type('MockedData', (object,), { 'data': np.array(["Good movie!", "Bad film.", "Awesome!", "Terrible..."]), 'target': np.array([1, 0, 1, 0]) }) monkeypatch.setattr(solution, 'load_files', mock_load_files) with patch('builtins.print') as mocked_print: main() # Check if accuracy and confusion matrix were printed assert mocked_print.called, "Print not called" called_args = [call.args[0] for call in mocked_print.call_args_list] assert any("Accuracy" in arg for arg in called_args) and any "Confusion Matrix" in arg for call.args[0] in called_args), "Expected output not printed"
answer:import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.datasets import load_files def preprocess_data(reviews: np.ndarray, labels: np.ndarray) -> tuple: Tokenizes and processes the text data, transforming it into numerical features using TF-IDF vectorization. Returns the vectorized features and corresponding labels. # Handle any missing values in the reviews reviews = np.array([review if isinstance(review, str) else "" for review in reviews]) vectorizer = TfidfVectorizer(stop_words="english") features = vectorizer.fit_transform(reviews) return features, labels def logistic_regression(features: np.ndarray, labels: np.ndarray, test_features: np.ndarray) -> np.ndarray: Initializes and trains a logistic regression model on the training features and labels. Predicts the sentiment for the given test features and returns the predictions. model = LogisticRegression(random_state=42, max_iter=1000) model.fit(features, labels) predictions = model.predict(test_features) return predictions def main() -> None: # Load IMDB dataset reviews_data = load_files('path/to/imdb/reviews', shuffle=True) reviews, labels = reviews_data.data, reviews_data.target # Preprocess the dataset features, labels = preprocess_data(reviews, labels) x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, random_state=42) # Train and predict predictions = logistic_regression(x_train, y_train, x_test) # Output results print(f"Accuracy: {accuracy_score(y_test, predictions)}") print(f"Confusion Matrix:n{confusion_matrix(y_test, predictions)}") if __name__ == "__main__": main()
question:def max_subarray_sum(profits: list) -> int: Returns the maximum sum of any contiguous subarray. # Initialize the maximum sum to the first element and the current sum to the first element. max_sum = current_sum = profits[0] # Iterate over the profits array starting from the second element for profit in profits[1:]: # Update the current_sum to the maximum of the current profit or the current_sum + profit. current_sum = max(profit, current_sum + profit) # Update the max_sum with the maximum of max_sum and current_sum. max_sum = max(max_sum, current_sum) return max_sum
answer:def max_subarray_sum(profits): Returns the maximum sum of any contiguous subarray. # Initialize the maximum sum to the first element and the current sum to the first element. max_sum = current_sum = profits[0] # Iterate over the profits array starting from the second element for profit in profits[1:]: # Update the current_sum to the maximum of the current profit or the current_sum + profit. current_sum = max(profit, current_sum + profit) # Update the max_sum with the maximum of max_sum and current_sum. max_sum = max(max_sum, current_sum) return max_sum
question:def get_matrix_elements(matrix: list[list[int]], indices: list[tuple[int, int]]) -> list[any]: Given a 2D matrix and a list of indices representing elements in the matrix, validate the indices and return the values of the elements at the specified indices. If an index is invalid (i.e., out of the matrix bounds), replace it with `None` in the output list. Parameters: matrix (list[list[int]]): A 2D matrix of integers. indices (list[tuple[int, int]]): A list of tuples, where each tuple contains two integers representing the row and column indices of the matrix. Returns: list[any]: A list of elements from the matrix at the specified indices or `None` if the index is invalid. Examples: >>> matrix = [ [1, 2, 3], [4, 5, 6], [7, 8, 9] ] >>> indices = [(0, 0), (1, 2), (3, 1), (-1, 0)] >>> get_matrix_elements(matrix, indices) [1, 6, None, None]
answer:def get_matrix_elements(matrix, indices): This function returns the values of the elements at the specified indices. If an index is invalid (i.e., out of the matrix bounds), it replaces the value with None. Parameters: matrix (list[list[int]]): A 2D matrix of integers. indices (list[tuple[int, int]]): A list of tuples representing row and column indices. Returns: list[any]: A list of elements from the matrix at the specified indices or None if the index is invalid. rows = len(matrix) cols = len(matrix[0]) if rows > 0 else 0 def is_valid_index(i, j): return 0 <= i < rows and 0 <= j < cols result = [] for (i, j) in indices: if is_valid_index(i, j): result.append(matrix[i][j]) else: result.append(None) return result